diff options
Diffstat (limited to 'drivers')
1862 files changed, 64184 insertions, 64950 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 8befa53f43be..c7396659fdbe 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -228,4 +228,5 @@ source "drivers/interconnect/Kconfig" source "drivers/counter/Kconfig" +source "drivers/most/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 31cf17dee252..7646549a1e93 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -186,3 +186,4 @@ obj-$(CONFIG_SIOX) += siox/ obj-$(CONFIG_GNSS) += gnss/ obj-$(CONFIG_INTERCONNECT) += interconnect/ obj-$(CONFIG_COUNTER) += counter/ +obj-$(CONFIG_MOST) += most/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index cc57bab146b5..ce2730d61a8f 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -190,6 +190,30 @@ config ACPI_BUTTON To compile this driver as a module, choose M here: the module will be called button. +config ACPI_TINY_POWER_BUTTON + tristate "Tiny Power Button Driver" + depends on !ACPI_BUTTON + help + This driver provides a tiny alternative to the ACPI Button driver. + The tiny power button driver only handles the power button. Rather + than notifying userspace via the input layer or a netlink event, this + driver directly signals the init process to shut down. + + This driver is particularly suitable for cloud and VM environments, + which use a simulated power button to initiate a controlled poweroff, + but which may not want to run a separate userspace daemon to process + input events. + +config ACPI_TINY_POWER_BUTTON_SIGNAL + int "Tiny Power Button Signal" + depends on ACPI_TINY_POWER_BUTTON + default 38 + help + Default signal to send to init in response to the power button. + + Likely values here include 38 (SIGRTMIN+4) to power off, or 2 + (SIGINT) to simulate Ctrl+Alt+Del. + config ACPI_VIDEO tristate "Video" depends on X86 && BACKLIGHT_CLASS_DEVICE diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 33fdaf67454e..e81e1ebbfb32 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o obj-$(CONFIG_ACPI_AC) += ac.o obj-$(CONFIG_ACPI_BUTTON) += button.o +obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o obj-$(CONFIG_ACPI_FAN) += fan.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_TAD) += acpi_tad.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 829f37d36b9f..69d2db13886b 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -293,29 +293,30 @@ static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d) return 0; } +/* Please keep this list alphabetically sorted */ static const struct dmi_system_id ac_dmi_table[] __initconst = { { - /* Thinkpad e530 */ - .callback = thinkpad_e530_quirk, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), + /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */ + .callback = ac_do_not_check_pmic_quirk, + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), }, }, { - /* ECS EF20EA */ + /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */ .callback = ac_do_not_check_pmic_quirk, .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80XF"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, }, { - /* Lenovo Ideapad Miix 320 */ - .callback = ac_do_not_check_pmic_quirk, + /* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */ + .callback = thinkpad_e530_quirk, .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), }, }, {}, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index db18df6cb330..dee999938213 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -306,11 +306,9 @@ static const struct lpss_device_desc bsw_spi_dev_desc = { .setup = lpss_deassert_reset, }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ - ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), {} }; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 15c5b272e698..bc96457c9e25 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -943,7 +943,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) int i, max_level = 0; unsigned long long level, level_old; struct acpi_video_device_brightness *br = NULL; - int result = -EINVAL; + int result; result = acpi_video_get_levels(device->dev, &br, &max_level); if (result) diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index b5516b04ffc0..6e9ec6e3fe47 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat) } #endif +static bool acpi_no_watchdog; + static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) { const struct acpi_table_wdat *wdat = NULL; acpi_status status; - if (acpi_disabled) + if (acpi_disabled || acpi_no_watchdog) return NULL; status = acpi_get_table(ACPI_SIG_WDAT, 0, @@ -88,6 +90,14 @@ bool acpi_has_watchdog(void) } EXPORT_SYMBOL_GPL(acpi_has_watchdog); +/* ACPI watchdog can be disabled on boot command line */ +static int __init disable_acpi_watchdog(char *str) +{ + acpi_no_watchdog = true; + return 1; +} +__setup("acpi_no_watchdog", disable_acpi_watchdog); + void __init acpi_watchdog_init(void) { const struct acpi_wdat_entry *entries; @@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void) gas = &entries[i].register_region; res.start = gas->address; + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index ede4b9cc9e85..cf85d66da6e7 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -65,9 +65,7 @@ void cg_write_aml_comment(union acpi_parse_object *op); /* * cvparser */ -void -cv_init_file_tree(struct acpi_table_header *table, - u8 *aml_start, u32 aml_length); +void cv_init_file_tree(struct acpi_table_header *table, FILE * root_file); void cv_clear_op_comments(union acpi_parse_object *op); diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 6ad0517553d5..ebf6453d0e21 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void); -u8 acpi_hw_check_all_gpes(void); +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number); acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 2269e10bc21b..168904ba3086 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -477,7 +477,7 @@ #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d); #define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b); #define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a) -#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c); +#define ASL_CV_INIT_FILETREE(a,b) cv_init_file_tree(a,b); #else @@ -492,7 +492,7 @@ #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) #define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) #define ASL_CV_FILE_HAS_SWITCHED(a) 0 -#define ASL_CV_INIT_FILETREE(a,b,c) +#define ASL_CV_INIT_FILETREE(a,b) #endif diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 8c83d8c620dc..9efca54c51ac 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -130,7 +130,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void) /* * Initialize the structure that keeps track of fixed event handlers and - * enable the fixed events. + * disable all of the fixed events. */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_gbl_fixed_event_handlers[i].handler = NULL; @@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) handler) (acpi_gbl_fixed_event_handlers[event].context)); } +/******************************************************************************* + * + * FUNCTION: acpi_any_fixed_event_status_set + * + * PARAMETERS: None + * + * RETURN: TRUE or FALSE + * + * DESCRIPTION: Checks the PM status register for active fixed events + * + ******************************************************************************/ + +u32 acpi_any_fixed_event_status_set(void) +{ + acpi_status status; + u32 in_status; + u32 in_enable; + u32 i; + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + + /* Both the status and enable bits must be on for this event */ + + if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + return (TRUE); + } + } + + return (FALSE); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index f2de66bfd8a7..3be60673e461 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) * * FUNCTION: acpi_any_gpe_status_set * - * PARAMETERS: None + * PARAMETERS: gpe_skip_number - Number of the GPE to skip * * RETURN: Whether or not the status bit is set for any GPE * - * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any - * of them is set or FALSE otherwise. + * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one + * represented by the "skip" argument, and return TRUE if any of + * them is set or FALSE otherwise. * ******************************************************************************/ -u32 acpi_any_gpe_status_set(void) +u32 acpi_any_gpe_status_set(u32 gpe_skip_number) { acpi_status status; + acpi_handle gpe_device; u8 ret; ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); @@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void) return (FALSE); } - ret = acpi_hw_check_all_gpes(); + status = acpi_get_gpe_device(gpe_skip_number, &gpe_device); + if (ACPI_FAILURE(status)) { + gpe_device = NULL; + } + + ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return (ret); diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index f4c285c2f595..49c46d4dd070 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, return (AE_OK); } +struct acpi_gpe_block_status_context { + struct acpi_gpe_register_info *gpe_skip_register_info; + u8 gpe_skip_mask; + u8 retval; +}; + /****************************************************************************** * * FUNCTION: acpi_hw_get_gpe_block_status * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info + * context - GPE list walk context data * * RETURN: Success * @@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, static acpi_status acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, - void *ret_ptr) + void *context) { + struct acpi_gpe_block_status_context *c = context; struct acpi_gpe_register_info *gpe_register_info; u64 in_enable, in_status; acpi_status status; - u8 *ret = ret_ptr; + u8 ret_mask; u32 i; /* Examine each GPE Register within the block */ @@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, continue; } - *ret |= in_enable & in_status; + ret_mask = in_enable & in_status; + if (ret_mask && c->gpe_skip_register_info == gpe_register_info) { + ret_mask &= ~c->gpe_skip_mask; + } + c->retval |= ret_mask; } return (AE_OK); @@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) * * FUNCTION: acpi_hw_check_all_gpes * - * PARAMETERS: None + * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip + * gpe_skip_number - Number of the GPE to skip * * RETURN: Combined status of all GPEs * - * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the + * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one + * represented by the "skip" arguments, and return TRUE if the * status bit is set for at least one of them of FALSE otherwise. * ******************************************************************************/ -u8 acpi_hw_check_all_gpes(void) +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number) { - u8 ret = 0; + struct acpi_gpe_block_status_context context = { + .gpe_skip_register_info = NULL, + .retval = 0, + }; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); - (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device, + gpe_skip_number); + if (gpe_event_info) { + context.gpe_skip_register_info = gpe_event_info->register_info; + context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info); + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - return (ret != 0); + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context); + return (context.retval != 0); } #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 243a25add28f..317ae870336b 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -300,6 +300,18 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state) [ACPI_EVENT_POWER_BUTTON]. status_register_id, ACPI_CLEAR_STATUS); + /* Enable sleep button */ + + (void) + acpi_write_bit_register(acpi_gbl_fixed_event_info + [ACPI_EVENT_SLEEP_BUTTON]. + enable_register_id, ACPI_ENABLE_EVENT); + + (void) + acpi_write_bit_register(acpi_gbl_fixed_event_info + [ACPI_EVENT_SLEEP_BUTTON]. + status_register_id, ACPI_CLEAR_STATUS); + acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 370bbc867745..d4d26147610e 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -164,7 +164,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, /* Build the path in the caller buffer */ (void)acpi_ns_build_normalized_path(node, buffer->pointer, - required_size, no_trailing); + (u32)required_size, no_trailing); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); @@ -315,7 +315,7 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, /* Build the path in the allocated buffer */ - (void)acpi_ns_build_normalized_path(node, name_buffer, size, + (void)acpi_ns_build_normalized_path(node, name_buffer, (u32)size, no_trailing); ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, "%s: Path \"%s\"\n", @@ -346,7 +346,7 @@ char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope, char *full_path = NULL; char *external_path = NULL; char *prefix_path = NULL; - u32 prefix_path_length = 0; + acpi_size prefix_path_length = 0; /* If there is a prefix, get the pathname to it */ diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 984129dcaa0c..0e6aba81605b 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -516,7 +516,7 @@ acpi_status acpi_install_method(u8 *buffer) method_flags = *parser_state.aml++; aml_start = parser_state.aml; - aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start); + aml_length = (u32)ACPI_PTR_DIFF(parser_state.pkg_end, aml_start); /* * Allocate resources up-front. We don't want to have to delete a new diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index f8403d480318..7490429ddbf6 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -202,14 +202,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table) * * PARAMETERS: signature - ACPI signature of needed table * instance - Which instance (for SSDTs) - * out_table_header - The pointer to the table header to fill + * out_table_header - The pointer to the where the table header + * is returned * - * RETURN: Status and pointer to mapped table header + * RETURN: Status and a copy of the table header * - * DESCRIPTION: Finds an ACPI table header. - * - * NOTE: Caller is responsible in unmapping the header with - * acpi_os_unmap_memory + * DESCRIPTION: Finds and returns an ACPI table header. Caller provides the + * memory where a copy of the header is to be returned + * (fixed length). * ******************************************************************************/ acpi_status diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 3e60bdac2200..bbec04c291d2 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -44,7 +44,7 @@ acpi_ut_get_element_length(u8 object_type, * * NOTE: We always allocate the worst-case object descriptor because * these objects are cached, and we want them to be - * one-size-satisifies-any-request. This in itself may not be + * one-size-satisfies-any-request. This in itself may not be * the most memory efficient, but the efficiency of the object * cache should more than make up for this! * diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 103acbbfcf9a..24c9642e8fc7 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes) * New allocation must be visible in all pgd before it can be found by * an NMI allocating from the pool. */ - vmalloc_sync_all(); + vmalloc_sync_mappings(); rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); if (rc) diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 111a407dcc77..366c389175d8 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1365,19 +1365,19 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { }, }, { - /* ECS EF20EA */ + /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */ .callback = battery_do_not_check_pmic_quirk, .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), }, }, { - /* Lenovo Ideapad Miix 320 */ + /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */ .callback = battery_do_not_check_pmic_quirk, .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80XF"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, }, {}, diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index f6925f16c4a2..00112cf15322 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -30,17 +30,14 @@ #define ACPI_BUTTON_NOTIFY_STATUS 0x80 #define ACPI_BUTTON_SUBCLASS_POWER "power" -#define ACPI_BUTTON_HID_POWER "PNP0C0C" #define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button" #define ACPI_BUTTON_TYPE_POWER 0x01 #define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" -#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" #define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button" #define ACPI_BUTTON_TYPE_SLEEP 0x03 #define ACPI_BUTTON_SUBCLASS_LID "lid" -#define ACPI_BUTTON_HID_LID "PNP0C0D" #define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" #define ACPI_BUTTON_TYPE_LID 0x05 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d1f1cf5d4bf0..4816df520f72 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -182,7 +182,6 @@ static bool boot_ec_is_ecdt = false; static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; -static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -690,21 +689,9 @@ static void advance_transaction(struct acpi_ec *ec) wakeup = true; } goto out; - } else { - if (EC_FLAGS_QUERY_HANDSHAKE && - !(status & ACPI_EC_FLAG_SCI) && - (t->command == ACPI_EC_COMMAND_QUERY)) { - ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); - t->rdata[t->ri++] = 0x00; - ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); - ec_dbg_evt("Command(%s) completed by software", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - wakeup = true; - } else if ((status & ACPI_EC_FLAG_IBF) == 0) { - acpi_ec_write_cmd(ec, t->command); - ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); - } else - goto err; + } else if (!(status & ACPI_EC_FLAG_IBF)) { + acpi_ec_write_cmd(ec, t->command); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); goto out; } err: @@ -1427,57 +1414,45 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) return AE_CTRL_TERMINATE; } -static void install_gpe_event_handler(struct acpi_ec *ec) -{ - acpi_status status = - acpi_install_gpe_raw_handler(NULL, ec->gpe, - ACPI_GPE_EDGE_TRIGGERED, - &acpi_ec_gpe_handler, - ec); - if (ACPI_SUCCESS(status)) { - /* This is not fatal as we can poll EC events */ - set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); - acpi_ec_leave_noirq(ec); - if (test_bit(EC_FLAGS_STARTED, &ec->flags) && - ec->reference_count >= 1) - acpi_ec_enable_gpe(ec, true); - } -} - -/* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */ -static int install_gpio_irq_event_handler(struct acpi_ec *ec, - struct acpi_device *device) +static bool install_gpe_event_handler(struct acpi_ec *ec) { - int irq = acpi_dev_gpio_irq_get(device, 0); - int ret; - - if (irq < 0) - return irq; + acpi_status status; - ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED, - "ACPI EC", ec); + status = acpi_install_gpe_raw_handler(NULL, ec->gpe, + ACPI_GPE_EDGE_TRIGGERED, + &acpi_ec_gpe_handler, ec); + if (ACPI_FAILURE(status)) + return false; - /* - * Unlike the GPE case, we treat errors here as fatal, we'll only - * implement GPIO polling if we find a case that needs it. - */ - if (ret < 0) - return ret; + if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) + acpi_ec_enable_gpe(ec, true); - ec->irq = irq; - set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); - acpi_ec_leave_noirq(ec); + return true; +} - return 0; +static bool install_gpio_irq_event_handler(struct acpi_ec *ec) +{ + return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, + "ACPI EC", ec) >= 0; } -/* - * Note: This function returns an error code only when the address space - * handler is not installed, which means "not able to handle - * transactions". +/** + * ec_install_handlers - Install service callbacks and register query methods. + * @ec: Target EC. + * @device: ACPI device object corresponding to @ec. + * + * Install a handler for the EC address space type unless it has been installed + * already. If @device is not NULL, also look for EC query methods in the + * namespace and register them, and install an event (either GPE or GPIO IRQ) + * handler for the EC, if possible. + * + * Return: + * -ENODEV if the address space handler cannot be installed, which means + * "unable to handle transactions", + * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred, + * or 0 (success) otherwise. */ -static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, - bool handle_events) +static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) { acpi_status status; @@ -1490,26 +1465,28 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, &acpi_ec_space_handler, NULL, ec); if (ACPI_FAILURE(status)) { - if (status == AE_NOT_FOUND) { - /* - * Maybe OS fails in evaluating the _REG - * object. The AE_NOT_FOUND error will be - * ignored and OS * continue to initialize - * EC. - */ - pr_err("Fail in evaluating the _REG object" - " of EC device. Broken bios is suspected.\n"); - } else { - acpi_ec_stop(ec, false); - return -ENODEV; - } + acpi_ec_stop(ec, false); + return -ENODEV; } set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } - if (!handle_events) + if (!device) return 0; + if (ec->gpe < 0) { + /* ACPI reduced hardware platforms use a GpioInt from _CRS. */ + int irq = acpi_dev_gpio_irq_get(device, 0); + /* + * Bail out right away for deferred probing or complete the + * initialization regardless of any other errors. + */ + if (irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (irq >= 0) + ec->irq = irq; + } + if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { /* Find and register all query methods */ acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, @@ -1518,16 +1495,21 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); } if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { - if (ec->gpe >= 0) { - install_gpe_event_handler(ec); - } else if (device) { - int ret = install_gpio_irq_event_handler(ec, device); - - if (ret) - return ret; - } else { /* No GPE and no GpioInt? */ - return -ENODEV; + bool ready = false; + + if (ec->gpe >= 0) + ready = install_gpe_event_handler(ec); + else if (ec->irq >= 0) + ready = install_gpio_irq_event_handler(ec); + + if (ready) { + set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); + acpi_ec_leave_noirq(ec); } + /* + * Failures to install an event handler are not fatal, because + * the EC can be polled for events. + */ } /* EC is fully operational, allow queries */ acpi_ec_enable_event(ec); @@ -1574,61 +1556,46 @@ static void ec_remove_handlers(struct acpi_ec *ec) } } -static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, - bool handle_events) +static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device) { int ret; - ret = ec_install_handlers(ec, device, handle_events); + ret = ec_install_handlers(ec, device); if (ret) return ret; /* First EC capable of handling transactions */ - if (!first_ec) { + if (!first_ec) first_ec = ec; - acpi_handle_info(first_ec->handle, "Used as first EC\n"); - } - acpi_handle_info(ec->handle, - "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", - ec->gpe, ec->irq, ec->command_addr, ec->data_addr); - return ret; -} + pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, + ec->data_addr); -static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) -{ - struct acpi_table_ecdt *ecdt_ptr; - acpi_status status; - acpi_handle handle; - - status = acpi_get_table(ACPI_SIG_ECDT, 1, - (struct acpi_table_header **)&ecdt_ptr); - if (ACPI_FAILURE(status)) - return false; - - status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); - if (ACPI_FAILURE(status)) - return false; + if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { + if (ec->gpe >= 0) + pr_info("GPE=0x%x\n", ec->gpe); + else + pr_info("IRQ=%d\n", ec->irq); + } - *phandle = handle; - return true; + return ret; } static int acpi_ec_add(struct acpi_device *device) { - struct acpi_ec *ec = NULL; - bool dep_update = true; - acpi_status status; + struct acpi_ec *ec; int ret; strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_EC_CLASS); - if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { - boot_ec_is_ecdt = true; + if ((boot_ec && boot_ec->handle == device->handle) || + !strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { + /* Fast path: this device corresponds to the boot EC. */ ec = boot_ec; - dep_update = false; } else { + acpi_status status; + ec = acpi_ec_alloc(); if (!ec) return -ENOMEM; @@ -1636,12 +1603,11 @@ static int acpi_ec_add(struct acpi_device *device) status = ec_parse_device(device->handle, 0, ec, NULL); if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; - goto err_alloc; + goto err; } if (boot_ec && ec->command_addr == boot_ec->command_addr && ec->data_addr == boot_ec->data_addr) { - boot_ec_is_ecdt = false; /* * Trust PNP0C09 namespace location rather than * ECDT ID. But trust ECDT GPE rather than _GPE @@ -1655,15 +1621,18 @@ static int acpi_ec_add(struct acpi_device *device) } } - ret = acpi_ec_setup(ec, device, true); + ret = acpi_ec_setup(ec, device); if (ret) - goto err_query; + goto err; if (ec == boot_ec) acpi_handle_info(boot_ec->handle, - "Boot %s EC used to handle transactions and events\n", + "Boot %s EC initialization complete\n", boot_ec_is_ecdt ? "ECDT" : "DSDT"); + acpi_handle_info(ec->handle, + "EC: Used to handle transactions and events\n"); + device->driver_data = ec; ret = !!request_region(ec->data_addr, 1, "EC data"); @@ -1671,19 +1640,16 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - if (dep_update) { - /* Reprobe devices depending on the EC */ - acpi_walk_dep_device_list(ec->handle); - } + /* Reprobe devices depending on the EC */ + acpi_walk_dep_device_list(ec->handle); + acpi_handle_debug(ec->handle, "enumerated.\n"); return 0; -err_query: - if (ec != boot_ec) - acpi_ec_remove_query_handlers(ec, true, 0); -err_alloc: +err: if (ec != boot_ec) acpi_ec_free(ec); + return ret; } @@ -1775,7 +1741,7 @@ void __init acpi_ec_dsdt_probe(void) * At this point, the GPE is not fully initialized, so do not to * handle the events. */ - ret = acpi_ec_setup(ec, NULL, false); + ret = acpi_ec_setup(ec, NULL); if (ret) { acpi_ec_free(ec); return; @@ -1788,52 +1754,43 @@ void __init acpi_ec_dsdt_probe(void) } /* - * If the DSDT EC is not functioning, we still need to prepare a fully - * functioning ECDT EC first in order to handle the events. - * https://bugzilla.kernel.org/show_bug.cgi?id=115021 + * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization. + * + * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not + * found a matching object in the namespace. + * + * Next, in case the DSDT EC is not functioning, it is still necessary to + * provide a functional ECDT EC to handle events, so add an extra device object + * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021). + * + * This is useful on platforms with valid ECDT and invalid DSDT EC settings, + * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847). */ -static int __init acpi_ec_ecdt_start(void) +static void __init acpi_ec_ecdt_start(void) { + struct acpi_table_ecdt *ecdt_ptr; acpi_handle handle; + acpi_status status; - if (!boot_ec) - return -ENODEV; - /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */ - if (!boot_ec_is_ecdt) - return -ENODEV; + /* Bail out if a matching EC has been found in the namespace. */ + if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT) + return; - /* - * At this point, the namespace and the GPE is initialized, so - * start to find the namespace objects and handle the events. - * - * Note: ec->handle can be valid if this function is called after - * acpi_ec_add(), hence the fast path. - */ - if (boot_ec->handle == ACPI_ROOT_OBJECT) { - if (!acpi_ec_ecdt_get_handle(&handle)) - return -ENODEV; - boot_ec->handle = handle; - } + /* Look up the object pointed to from the ECDT in the namespace. */ + status = acpi_get_table(ACPI_SIG_ECDT, 1, + (struct acpi_table_header **)&ecdt_ptr); + if (ACPI_FAILURE(status)) + return; - /* Register to ACPI bus with PM ops attached */ - return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); -} + status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); + if (ACPI_FAILURE(status)) + return; -#if 0 -/* - * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not - * set, for which case, we complete the QR_EC without issuing it to the - * firmware. - * https://bugzilla.kernel.org/show_bug.cgi?id=82611 - * https://bugzilla.kernel.org/show_bug.cgi?id=97381 - */ -static int ec_flag_query_handshake(const struct dmi_system_id *id) -{ - pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); - EC_FLAGS_QUERY_HANDSHAKE = 1; - return 0; + boot_ec->handle = handle; + + /* Add a special ACPI device object to represent the boot EC. */ + acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); } -#endif /* * On some hardware it is necessary to clear events accumulated by the EC during @@ -1962,7 +1919,7 @@ void __init acpi_ec_ecdt_probe(void) * At this point, the namespace is not initialized, so do not find * the namespace objects, or handle the events. */ - ret = acpi_ec_setup(ec, NULL, false); + ret = acpi_ec_setup(ec, NULL); if (ret) { acpi_ec_free(ec); return; @@ -2037,6 +1994,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action) acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); } +bool acpi_ec_other_gpes_active(void) +{ + return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX); +} + bool acpi_ec_dispatch_gpe(void) { u32 ret; @@ -2160,14 +2122,13 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { { }, }; -int __init acpi_ec_init(void) +void __init acpi_ec_init(void) { int result; - int ecdt_fail, dsdt_fail; result = acpi_ec_init_workqueues(); if (result) - return result; + return; /* * Disable EC wakeup on following systems to prevent periodic @@ -2178,16 +2139,10 @@ int __init acpi_ec_init(void) pr_debug("Disabling EC wakeup on suspend-to-idle\n"); } - /* Drivers must be started after acpi_ec_query_init() */ - dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); - /* - * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is - * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT - * settings but invalid DSDT settings. - * https://bugzilla.kernel.org/show_bug.cgi?id=196847 - */ - ecdt_fail = acpi_ec_ecdt_start(); - return ecdt_fail && dsdt_fail ? -ENODEV : 0; + /* Driver must be registered after acpi_ec_init_workqueues(). */ + acpi_bus_register_driver(&acpi_ec_driver); + + acpi_ec_ecdt_start(); } /* EC driver currently not unloadable */ diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index aaf4e8f348cf..873e039ad4b7 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -276,29 +276,29 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha int count; if (fps->control == 0xFFFFFFFF || fps->control > 100) - count = snprintf(buf, PAGE_SIZE, "not-defined:"); + count = scnprintf(buf, PAGE_SIZE, "not-defined:"); else - count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control); + count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control); if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) - count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); else - count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point); + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point); if (fps->speed == 0xFFFFFFFF) - count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); else - count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed); + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed); if (fps->noise_level == 0xFFFFFFFF) - count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); else - count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100); + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100); if (fps->power == 0xFFFFFFFF) - count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n"); + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n"); else - count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power); + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power); return count; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 3616daec650b..e387517d3354 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -190,7 +190,7 @@ extern struct acpi_ec *first_ec; /* External interfaces use first EC only, so remember */ typedef int (*acpi_ec_query_func) (void *data); -int acpi_ec_init(void); +void acpi_ec_init(void); void acpi_ec_ecdt_probe(void); void acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); @@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); #ifdef CONFIG_PM_SLEEP void acpi_ec_flush_work(void); +bool acpi_ec_other_gpes_active(void); bool acpi_ec_dispatch_gpe(void); #endif diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 41168c027a5a..762c5d50b8fe 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1598,6 +1598,7 @@ void acpi_os_delete_lock(acpi_spinlock handle) */ acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) + __acquires(lockp) { acpi_cpu_flags flags; spin_lock_irqsave(lockp, flags); @@ -1609,6 +1610,7 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) */ void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) + __releases(lockp) { spin_unlock_irqrestore(lockp, flags); } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d1e666ef3fcc..f92df2533e7e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -153,7 +153,7 @@ static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, buf[0] = '\0'; for (i = 0, entry = table; i < size; i++, entry++) if (word & entry->bit) - len += snprintf(buf + len, sizeof(buf) - len, "%s%s", + len += scnprintf(buf + len, sizeof(buf) - len, "%s%s", len ? " " : "", entry->desc); dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf); diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 0e62ef265ce4..7892980b3ce4 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -22,14 +22,13 @@ ACPI_MODULE_NAME("sleep") static int acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { struct acpi_device_physical_node *entry; if (!dev->wakeup.flags.valid) @@ -96,7 +95,7 @@ acpi_system_write_wakeup_device(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; char strbuf[5]; char str[5] = ""; @@ -109,9 +108,8 @@ acpi_system_write_wakeup_device(struct file *file, sscanf(strbuf, "%s", str); mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid) continue; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 152f7fc0b200..bb1ae400ec1f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -982,10 +982,7 @@ static int acpi_s2idle_prepare_late(void) static void acpi_s2idle_sync(void) { - /* - * The EC driver uses the system workqueue and an additional special - * one, so those need to be flushed too. - */ + /* The EC driver uses special workqueues that need to be flushed. */ acpi_ec_flush_work(); acpi_os_wait_events_complete(); /* synchronize Notify handling */ } @@ -1006,19 +1003,27 @@ static bool acpi_s2idle_wake(void) return true; /* - * If there are no EC events to process and at least one of the - * other enabled GPEs is active, the wakeup is regarded as a - * genuine one. - * - * Note that the checks below must be carried out in this order - * to avoid returning prematurely due to a change of the EC GPE - * status bit from unset to set between the checks with the - * status bits of all the other GPEs unset. + * If the status bit of any enabled fixed event is set, the + * wakeup is regarded as valid. + */ + if (acpi_any_fixed_event_status_set()) + return true; + + /* + * If the status bit is set for any enabled GPE other than the + * EC one, the wakeup is regarded as a genuine one. */ - if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) + if (acpi_ec_other_gpes_active()) return true; /* + * If the EC GPE status bit has not been set, the wakeup is + * regarded as a spurious one. + */ + if (!acpi_ec_dispatch_gpe()) + return false; + + /* * Cancel the wakeup and process all pending events in case * there are any wakeup ones in there. * diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c new file mode 100644 index 000000000000..6273d73c0b59 --- /dev/null +++ b/drivers/acpi/tiny-power-button.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/acpi.h> +#include <acpi/button.h> + +ACPI_MODULE_NAME("tiny-power-button"); +MODULE_AUTHOR("Josh Triplett"); +MODULE_DESCRIPTION("ACPI Tiny Power Button Driver"); +MODULE_LICENSE("GPL"); + +static int power_signal __read_mostly = CONFIG_ACPI_TINY_POWER_BUTTON_SIGNAL; +module_param(power_signal, int, 0644); +MODULE_PARM_DESC(power_signal, "Power button sends this signal to init"); + +static const struct acpi_device_id tiny_power_button_device_ids[] = { + { ACPI_BUTTON_HID_POWER, 0 }, + { ACPI_BUTTON_HID_POWERF, 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids); + +static int acpi_noop_add_remove(struct acpi_device *device) +{ + return 0; +} + +static void acpi_tiny_power_button_notify(struct acpi_device *device, u32 event) +{ + kill_cad_pid(power_signal, 1); +} + +static struct acpi_driver acpi_tiny_power_button_driver = { + .name = "tiny-power-button", + .class = "tiny-power-button", + .ids = tiny_power_button_device_ids, + .ops = { + .add = acpi_noop_add_remove, + .remove = acpi_noop_add_remove, + .notify = acpi_tiny_power_button_notify, + }, +}; + +module_driver(acpi_tiny_power_button_driver, + acpi_bus_register_driver, + acpi_bus_unregister_driver); diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 9614126bf56e..c28244df56a5 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -30,12 +30,10 @@ ACPI_MODULE_NAME("wakeup_devices") */ void acpi_enable_wakeup_devices(u8 sleep_state) { - struct list_head *node, *next; - - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + struct acpi_device *dev, *tmp; + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid || sleep_state > (u32) dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) @@ -57,12 +55,10 @@ void acpi_enable_wakeup_devices(u8 sleep_state) */ void acpi_disable_wakeup_devices(u8 sleep_state) { - struct list_head *node, *next; - - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + struct acpi_device *dev, *tmp; + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid || sleep_state > (u32) dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) @@ -79,13 +75,11 @@ void acpi_disable_wakeup_devices(u8 sleep_state) int __init acpi_wakeup_device_init(void) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = container_of(node, - struct acpi_device, - wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (device_can_wakeup(&dev->dev)) { /* Button GPEs are supposed to be always enabled. */ acpi_enable_gpe(dev->wakeup.gpe_device, diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 697a6b12d6b9..bdc1ba00aee9 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -37,7 +37,7 @@ struct always_present_id { const char *uid; }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } +#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL) #define ENTRY(hid, uid, cpu_models, dmi...) { \ { { hid, }, {} }, \ @@ -51,29 +51,29 @@ static const struct always_present_id always_present_ids[] = { * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}), - ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}), + ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}), /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */ - ENTRY("80862289", "2", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), { DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), }), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), { + ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), { + ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), @@ -89,19 +89,19 @@ static const struct always_present_id always_present_ids[] = { * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a6b2082c24f8..e47c8a4c83db 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5228,6 +5228,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_dev = container_of(filp->private_data, struct binder_device, miscdev); } + refcount_inc(&binder_dev->ref); proc->context = &binder_dev->context; binder_alloc_init(&proc->alloc); @@ -5405,6 +5406,7 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; + struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5421,6 +5423,12 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(context->name); + kfree(device); + } + proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we @@ -6077,6 +6085,7 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; + refcount_set(&binder_device->ref, 1); binder_device->context.binder_context_mgr_uid = INVALID_UID; binder_device->context.name = name; mutex_init(&binder_device->context.context_mgr_node_lock); diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index ae991097d14d..283d3cb9c16e 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mutex.h> +#include <linux/refcount.h> #include <linux/stddef.h> #include <linux/types.h> #include <linux/uidgid.h> @@ -33,6 +34,7 @@ struct binder_device { struct miscdevice miscdev; struct binder_context context; struct inode *binderfs_inode; + refcount_t ref; }; /** diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index e2580e5316a2..f303106b3362 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, if (!name) goto err; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode) ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); - kfree(device->context.name); - kfree(device); + if (refcount_dec_and_test(&device->ref)) { + kfree(device->context.name); + kfree(device); + } } /** @@ -445,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a6beb2c5a692..05ecdce1b702 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -34,6 +34,12 @@ if ATA config ATA_NONSTANDARD bool +config SATA_HOST + bool + +config PATA_TIMINGS + bool + config ATA_VERBOSE_ERROR bool "Verbose ATA error reporting" default y @@ -45,9 +51,26 @@ config ATA_VERBOSE_ERROR If unsure, say Y. +config ATA_FORCE + bool "\"libata.force=\" kernel parameter support" if EXPERT + default y + help + This option adds support for "libata.force=" kernel parameter for + forcing configuration settings. + + For further information, please read + <file:Documentation/admin-guide/kernel-parameters.txt>. + + This option will enlarge the kernel by approx. 3KB. Disable it if + kernel size is more important than ability to override the default + configuration settings. + + If unsure, say Y. + config ATA_ACPI bool "ATA ACPI Support" depends on ACPI + select PATA_TIMINGS default y help This option adds support for ATA-related ACPI objects. @@ -73,6 +96,7 @@ config SATA_ZPODD config SATA_PMP bool "SATA Port Multiplier support" + depends on SATA_HOST default y help This option adds support for SATA Port Multipliers @@ -85,6 +109,7 @@ comment "Controllers with non-SFF native interface" config SATA_AHCI tristate "AHCI SATA support" depends on PCI + select SATA_HOST help This option enables support for AHCI Serial ATA. @@ -111,6 +136,7 @@ config SATA_MOBILE_LPM_POLICY config SATA_AHCI_PLATFORM tristate "Platform AHCI SATA support" + select SATA_HOST help This option enables support for Platform AHCI Serial ATA controllers. @@ -121,6 +147,7 @@ config AHCI_BRCM tristate "Broadcom AHCI SATA support" depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ ARCH_BCM_63XX + select SATA_HOST help This option enables support for the AHCI SATA3 controller found on Broadcom SoC's. @@ -130,6 +157,7 @@ config AHCI_BRCM config AHCI_DA850 tristate "DaVinci DA850 AHCI SATA support" depends on ARCH_DAVINCI_DA850 + select SATA_HOST help This option enables support for the DaVinci DA850 SoC's onboard AHCI SATA. @@ -139,6 +167,7 @@ config AHCI_DA850 config AHCI_DM816 tristate "DaVinci DM816 AHCI SATA support" depends on ARCH_OMAP2PLUS + select SATA_HOST help This option enables support for the DaVinci DM816 SoC's onboard AHCI SATA controller. @@ -148,6 +177,7 @@ config AHCI_DM816 config AHCI_ST tristate "ST AHCI SATA support" depends on ARCH_STI + select SATA_HOST help This option enables support for ST AHCI SATA controller. @@ -157,6 +187,7 @@ config AHCI_IMX tristate "Freescale i.MX AHCI SATA support" depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST) depends on (HWMON && (THERMAL || !THERMAL_OF)) || !HWMON + select SATA_HOST help This option enables support for the Freescale i.MX SoC's onboard AHCI SATA. @@ -166,6 +197,7 @@ config AHCI_IMX config AHCI_CEVA tristate "CEVA AHCI SATA support" depends on OF + select SATA_HOST help This option enables support for the CEVA AHCI SATA. It can be found on the Xilinx Zynq UltraScale+ MPSoC. @@ -176,6 +208,7 @@ config AHCI_MTK tristate "MediaTek AHCI SATA support" depends on ARCH_MEDIATEK select MFD_SYSCON + select SATA_HOST help This option enables support for the MediaTek SoC's onboard AHCI SATA controller. @@ -185,6 +218,7 @@ config AHCI_MTK config AHCI_MVEBU tristate "Marvell EBU AHCI SATA support" depends on ARCH_MVEBU + select SATA_HOST help This option enables support for the Marvebu EBU SoC's onboard AHCI SATA. @@ -203,6 +237,7 @@ config AHCI_OCTEON config AHCI_SUNXI tristate "Allwinner sunxi AHCI SATA support" depends on ARCH_SUNXI + select SATA_HOST help This option enables support for the Allwinner sunxi SoC's onboard AHCI SATA. @@ -212,6 +247,7 @@ config AHCI_SUNXI config AHCI_TEGRA tristate "NVIDIA Tegra AHCI SATA support" depends on ARCH_TEGRA + select SATA_HOST help This option enables support for the NVIDIA Tegra SoC's onboard AHCI SATA. @@ -221,12 +257,14 @@ config AHCI_TEGRA config AHCI_XGENE tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support" depends on PHY_XGENE + select SATA_HOST help This option enables support for APM X-Gene SoC SATA host controller. config AHCI_QORIQ tristate "Freescale QorIQ AHCI SATA support" depends on OF + select SATA_HOST help This option enables support for the Freescale QorIQ AHCI SoC's onboard AHCI SATA. @@ -236,6 +274,7 @@ config AHCI_QORIQ config SATA_FSL tristate "Freescale 3.0Gbps SATA support" depends on FSL_SOC + select SATA_HOST help This option enables support for Freescale 3.0Gbps SATA controller. It can be found on MPC837x and MPC8315. @@ -245,6 +284,7 @@ config SATA_FSL config SATA_GEMINI tristate "Gemini SATA bridge support" depends on ARCH_GEMINI || COMPILE_TEST + select SATA_HOST default ARCH_GEMINI help This enabled support for the FTIDE010 to SATA bridge @@ -255,6 +295,7 @@ config SATA_GEMINI config SATA_AHCI_SEATTLE tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support" depends on ARCH_SEATTLE + select SATA_HOST help This option enables support for AMD Seattle SATA host controller. @@ -263,12 +304,14 @@ config SATA_AHCI_SEATTLE config SATA_INIC162X tristate "Initio 162x SATA support (Very Experimental)" depends on PCI + select SATA_HOST help This option enables support for Initio 162x Serial ATA. config SATA_ACARD_AHCI tristate "ACard AHCI variant (ATP 8620)" depends on PCI + select SATA_HOST help This option enables support for Acard. @@ -277,6 +320,7 @@ config SATA_ACARD_AHCI config SATA_SIL24 tristate "Silicon Image 3124/3132 SATA support" depends on PCI + select SATA_HOST help This option enables support for Silicon Image 3124/3132 Serial ATA. @@ -317,6 +361,7 @@ config PDC_ADMA config PATA_OCTEON_CF tristate "OCTEON Boot Bus Compact Flash support" depends on CAVIUM_OCTEON_SOC + select PATA_TIMINGS help This option enables a polled compact flash driver for use with compact flash cards attached to the OCTEON boot bus. @@ -326,6 +371,7 @@ config PATA_OCTEON_CF config SATA_QSTOR tristate "Pacific Digital SATA QStor support" depends on PCI + select SATA_HOST help This option enables support for Pacific Digital Serial ATA QStor. @@ -334,6 +380,7 @@ config SATA_QSTOR config SATA_SX4 tristate "Promise SATA SX4 support (Experimental)" depends on PCI + select SATA_HOST help This option enables support for Promise Serial ATA SX4. @@ -357,6 +404,7 @@ comment "SATA SFF controllers with BMDMA" config ATA_PIIX tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" depends on PCI + select SATA_HOST help This option enables support for ICH5/6/7/8 Serial ATA and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series @@ -368,6 +416,7 @@ config SATA_DWC tristate "DesignWare Cores SATA support" depends on DMADEVICES select GENERIC_PHY + select SATA_HOST help This option enables support for the on-chip SATA controller of the AppliedMicro processor 460EX. @@ -398,6 +447,7 @@ config SATA_DWC_VDEBUG config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" depends on ARCH_HIGHBANK || COMPILE_TEST + select SATA_HOST help This option enables support for the Calxeda Highbank SoC's onboard SATA. @@ -409,6 +459,7 @@ config SATA_MV depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST select GENERIC_PHY + select SATA_HOST help This option enables support for the Marvell Serial ATA family. Currently supports 88SX[56]0[48][01] PCI(-X) chips, @@ -419,6 +470,7 @@ config SATA_MV config SATA_NV tristate "NVIDIA SATA support" depends on PCI + select SATA_HOST help This option enables support for NVIDIA Serial ATA. @@ -427,6 +479,7 @@ config SATA_NV config SATA_PROMISE tristate "Promise SATA TX2/TX4 support" depends on PCI + select SATA_HOST help This option enables support for Promise Serial ATA TX2/TX4. @@ -435,6 +488,7 @@ config SATA_PROMISE config SATA_RCAR tristate "Renesas R-Car SATA support" depends on ARCH_RENESAS || COMPILE_TEST + select SATA_HOST help This option enables support for Renesas R-Car Serial ATA. @@ -443,6 +497,7 @@ config SATA_RCAR config SATA_SIL tristate "Silicon Image SATA support" depends on PCI + select SATA_HOST help This option enables support for Silicon Image Serial ATA. @@ -452,6 +507,7 @@ config SATA_SIS tristate "SiS 964/965/966/180 SATA support" depends on PCI select PATA_SIS + select SATA_HOST help This option enables support for SiS Serial ATA on SiS 964/965/966/180 and Parallel ATA on SiS 180. @@ -462,6 +518,7 @@ config SATA_SIS config SATA_SVW tristate "ServerWorks Frodo / Apple K2 SATA support" depends on PCI + select SATA_HOST help This option enables support for Broadcom/Serverworks/Apple K2 SATA support. @@ -471,6 +528,7 @@ config SATA_SVW config SATA_ULI tristate "ULi Electronics SATA support" depends on PCI + select SATA_HOST help This option enables support for ULi Electronics SATA. @@ -479,6 +537,7 @@ config SATA_ULI config SATA_VIA tristate "VIA SATA support" depends on PCI + select SATA_HOST help This option enables support for VIA Serial ATA. @@ -487,6 +546,7 @@ config SATA_VIA config SATA_VITESSE tristate "VITESSE VSC-7174 / INTEL 31244 SATA support" depends on PCI + select SATA_HOST help This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA. @@ -497,6 +557,7 @@ comment "PATA SFF controllers with BMDMA" config PATA_ALI tristate "ALi PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the ALi ATA interfaces found on the many ALi chipsets. @@ -506,6 +567,7 @@ config PATA_ALI config PATA_AMD tristate "AMD/NVidia PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the AMD and NVidia PATA interfaces found on the chipsets for Athlon/Athlon64. @@ -540,6 +602,7 @@ config PATA_ATIIXP config PATA_ATP867X tristate "ARTOP/Acard ATP867X PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for ARTOP/Acard ATP867X PATA controllers. @@ -549,6 +612,7 @@ config PATA_ATP867X config PATA_BK3710 tristate "Palmchip BK3710 PATA support" depends on ARCH_DAVINCI + select PATA_TIMINGS help This option enables support for the integrated IDE controller on the TI DaVinci SoC. @@ -558,6 +622,7 @@ config PATA_BK3710 config PATA_CMD64X tristate "CMD64x PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the CMD64x series chips except for the CMD640. @@ -603,6 +668,7 @@ config PATA_CS5536 config PATA_CYPRESS tristate "Cypress CY82C693 PATA support (Very Experimental)" depends on PCI + select PATA_TIMINGS help This option enables support for the Cypress/Contaq CY82C693 chipset found in some Alpha systems @@ -621,6 +687,7 @@ config PATA_EFAR config PATA_EP93XX tristate "Cirrus Logic EP93xx PATA support" depends on ARCH_EP93XX + select PATA_TIMINGS help This option enables support for the PATA controller in the Cirrus Logic EP9312 and EP9315 ARM CPU. @@ -685,6 +752,7 @@ config PATA_HPT3X3_DMA config PATA_ICSIDE tristate "Acorn ICS PATA support" depends on ARM && ARCH_ACORN + select PATA_TIMINGS help On Acorn systems, say Y here if you wish to use the ICS PATA interface card. This is not required for ICS partition support. @@ -693,6 +761,7 @@ config PATA_ICSIDE config PATA_IMX tristate "PATA support for Freescale iMX" depends on ARCH_MXC + select PATA_TIMINGS help This option enables support for the PATA host available on Freescale iMX SoCs. @@ -778,6 +847,7 @@ config PATA_NINJA32 config PATA_NS87415 tristate "Nat Semi NS87415 PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the National Semiconductor NS87415 PCI-IDE controller. @@ -902,6 +972,7 @@ config PATA_TRIFLEX config PATA_VIA tristate "VIA PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the VIA PATA interfaces found on the many VIA chipsets. @@ -935,6 +1006,7 @@ comment "PIO-only SFF controllers" config PATA_CMD640_PCI tristate "CMD640 PCI PATA support (Experimental)" depends on PCI + select PATA_TIMINGS help This option enables support for the CMD640 PCI IDE interface chip. Only the primary channel is currently @@ -1005,6 +1077,7 @@ config PATA_MPIIX config PATA_NS87410 tristate "Nat Semi NS87410 PATA support" depends on PCI + select PATA_TIMINGS help This option enables support for the National Semiconductor NS87410 PCI-IDE controller. @@ -1085,6 +1158,7 @@ config PATA_RZ1000 config PATA_SAMSUNG_CF tristate "Samsung SoC PATA support" depends on SAMSUNG_DEV_IDE + select PATA_TIMINGS help This option enables basic support for Samsung's S3C/S5P board PATA controllers via the new ATA layer @@ -1104,6 +1178,7 @@ comment "Generic fallback / legacy drivers" config PATA_ACPI tristate "ACPI firmware driver for PATA" depends on ATA_ACPI && ATA_BMDMA && PCI + select PATA_TIMINGS help This option enables an ACPI method driver which drives motherboard PATA controller interfaces through the ACPI @@ -1113,6 +1188,7 @@ config PATA_ACPI config ATA_GENERIC tristate "Generic ATA support" depends on PCI && ATA_BMDMA + select SATA_HOST help This option enables support for generic BIOS configured ATA controllers via the new ATA layer @@ -1122,6 +1198,7 @@ config ATA_GENERIC config PATA_LEGACY tristate "Legacy ISA PATA support (Experimental)" depends on (ISA || PCI) + select PATA_TIMINGS help This option enables support for ISA/VLB/PCI bus legacy PATA ports and allows them to be accessed via the new ATA layer. diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d8cc2e04a6c7..b8aebfb14e82 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -123,7 +123,9 @@ obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o libata-y := libata-core.o libata-scsi.o libata-eh.o \ libata-transport.o libata-trace.o +libata-$(CONFIG_SATA_HOST) += libata-sata.o libata-$(CONFIG_ATA_SFF) += libata-sff.o libata-$(CONFIG_SATA_PMP) += libata-pmp.o libata-$(CONFIG_ATA_ACPI) += libata-acpi.o libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o +libata-$(CONFIG_PATA_TIMINGS) += libata-pata-timings.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 11ea1aff40db..ad0185c8dcee 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -40,6 +40,7 @@ enum { AHCI_PCI_BAR_STA2X11 = 0, AHCI_PCI_BAR_CAVIUM = 0, + AHCI_PCI_BAR_LOONGSON = 0, AHCI_PCI_BAR_ENMOTUS = 2, AHCI_PCI_BAR_CAVIUM_GEN5 = 4, AHCI_PCI_BAR_STANDARD = 5, @@ -245,6 +246,7 @@ static const struct ata_port_info ahci_port_info[] = { static const struct pci_device_id ahci_pci_tbl[] = { /* Intel */ + { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */ { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ @@ -401,6 +403,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ + { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */ + { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ @@ -589,6 +593,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Enmotus */ { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, + /* Loongson */ + { PCI_VDEVICE(LOONGSON, 0x7a08), board_ahci }, + /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, @@ -1680,6 +1687,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ahci_pci_bar = AHCI_PCI_BAR_CAVIUM; if (pdev->device == 0xa084) ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5; + } else if (pdev->vendor == PCI_VENDOR_ID_LOONGSON) { + if (pdev->device == 0x7a08) + ahci_pci_bar = AHCI_PCI_BAR_LOONGSON; } /* acquire resources */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 42c8728f6117..beca5f91bb4c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2,10 +2,6 @@ /* * libata-core.c - helper library for ATA * - * Maintained by: Tejun Heo <tj@kernel.org> - * Please ALWAYS copy linux-ide@vger.kernel.org - * on emails. - * * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * @@ -22,6 +18,11 @@ * http://www.compactflash.org (CF) * http://www.qic.org (QIC157 - Tape and DSC) * http://www.ce-ata.org (CE-ATA: not supported) + * + * libata is essentially a library of internal helper functions for + * low-level ATA host controller drivers. As such, the API/ABI is + * likely to change as new drivers are added and updated. + * Do not depend on ABI/API stability. */ #include <linux/kernel.h> @@ -56,6 +57,7 @@ #include <linux/leds.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> +#include <asm/setup.h> #define CREATE_TRACE_POINTS #include <trace/events/libata.h> @@ -63,11 +65,6 @@ #include "libata.h" #include "libata-transport.h" -/* debounce timing parameters in msecs { interval, duration, timeout } */ -const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; -const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; -const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; - const struct ata_port_operations ata_base_port_ops = { .prereset = ata_std_prereset, .postreset = ata_std_postreset, @@ -82,6 +79,7 @@ const struct ata_port_operations sata_port_ops = { .qc_defer = ata_std_qc_defer, .hardreset = sata_std_hardreset, }; +EXPORT_SYMBOL_GPL(sata_port_ops); static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); @@ -91,14 +89,15 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev); atomic_t ata_print_id = ATOMIC_INIT(0); +#ifdef CONFIG_ATA_FORCE struct ata_force_param { const char *name; - unsigned int cbl; - int spd_limit; + u8 cbl; + u8 spd_limit; unsigned long xfer_mask; unsigned int horkage_on; unsigned int horkage_off; - unsigned int lflags; + u16 lflags; }; struct ata_force_ent { @@ -110,10 +109,11 @@ struct ata_force_ent { static struct ata_force_ent *ata_force_tbl; static int ata_force_tbl_size; -static char ata_force_param_buf[PAGE_SIZE] __initdata; +static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata; /* param_buf is thrown away after initialization, disallow read */ module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)"); +#endif static int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); @@ -224,6 +224,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, return NULL; } +EXPORT_SYMBOL_GPL(ata_link_next); /** * ata_dev_next - device iteration helper @@ -277,6 +278,7 @@ struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, goto next; return dev; } +EXPORT_SYMBOL_GPL(ata_dev_next); /** * ata_dev_phys_link - find physical link for a device @@ -303,6 +305,7 @@ struct ata_link *ata_dev_phys_link(struct ata_device *dev) return ap->slave_link; } +#ifdef CONFIG_ATA_FORCE /** * ata_force_cbl - force cable type according to libata.force * @ap: ATA port of interest @@ -483,6 +486,11 @@ static void ata_force_horkage(struct ata_device *dev) fe->param.name); } } +#else +static inline void ata_force_link_limits(struct ata_link *link) { } +static inline void ata_force_xfermask(struct ata_device *dev) { } +static inline void ata_force_horkage(struct ata_device *dev) { } +#endif /** * atapi_cmd_type - Determine ATAPI command type from SCSI opcode @@ -521,79 +529,7 @@ int atapi_cmd_type(u8 opcode) return ATAPI_MISC; } } - -/** - * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure - * @tf: Taskfile to convert - * @pmp: Port multiplier port - * @is_cmd: This FIS is for command - * @fis: Buffer into which data will output - * - * Converts a standard ATA taskfile to a Serial ATA - * FIS structure (Register - Host to Device). - * - * LOCKING: - * Inherited from caller. - */ -void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) -{ - fis[0] = 0x27; /* Register - Host to Device FIS */ - fis[1] = pmp & 0xf; /* Port multiplier number*/ - if (is_cmd) - fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ - - fis[2] = tf->command; - fis[3] = tf->feature; - - fis[4] = tf->lbal; - fis[5] = tf->lbam; - fis[6] = tf->lbah; - fis[7] = tf->device; - - fis[8] = tf->hob_lbal; - fis[9] = tf->hob_lbam; - fis[10] = tf->hob_lbah; - fis[11] = tf->hob_feature; - - fis[12] = tf->nsect; - fis[13] = tf->hob_nsect; - fis[14] = 0; - fis[15] = tf->ctl; - - fis[16] = tf->auxiliary & 0xff; - fis[17] = (tf->auxiliary >> 8) & 0xff; - fis[18] = (tf->auxiliary >> 16) & 0xff; - fis[19] = (tf->auxiliary >> 24) & 0xff; -} - -/** - * ata_tf_from_fis - Convert SATA FIS to ATA taskfile - * @fis: Buffer from which data will be input - * @tf: Taskfile to output - * - * Converts a serial ATA FIS structure to a standard ATA taskfile. - * - * LOCKING: - * Inherited from caller. - */ - -void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) -{ - tf->command = fis[2]; /* status */ - tf->feature = fis[3]; /* error */ - - tf->lbal = fis[4]; - tf->lbam = fis[5]; - tf->lbah = fis[6]; - tf->device = fis[7]; - - tf->hob_lbal = fis[8]; - tf->hob_lbam = fis[9]; - tf->hob_lbah = fis[10]; - - tf->nsect = fis[12]; - tf->hob_nsect = fis[13]; -} +EXPORT_SYMBOL_GPL(atapi_cmd_type); static const u8 ata_rw_cmds[] = { /* pio multi */ @@ -868,6 +804,7 @@ unsigned long ata_pack_xfermask(unsigned long pio_mask, ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); } +EXPORT_SYMBOL_GPL(ata_pack_xfermask); /** * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks @@ -923,6 +860,7 @@ u8 ata_xfer_mask2mode(unsigned long xfer_mask) return ent->base + highbit - ent->shift; return 0xff; } +EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); /** * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* @@ -946,6 +884,7 @@ unsigned long ata_xfer_mode2mask(u8 xfer_mode) & ~((1 << ent->shift) - 1); return 0; } +EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); /** * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* @@ -968,6 +907,7 @@ int ata_xfer_mode2shift(unsigned long xfer_mode) return ent->shift; return -1; } +EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); /** * ata_mode_string - convert xfer_mask to string @@ -1014,6 +954,7 @@ const char *ata_mode_string(unsigned long xfer_mask) return xfer_mode_str[highbit]; return "<n/a>"; } +EXPORT_SYMBOL_GPL(ata_mode_string); const char *sata_spd_string(unsigned int spd) { @@ -1094,6 +1035,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) DPRINTK("unknown device\n"); return ATA_DEV_UNKNOWN; } +EXPORT_SYMBOL_GPL(ata_dev_classify); /** * ata_id_string - Convert IDENTIFY DEVICE page into string @@ -1130,6 +1072,7 @@ void ata_id_string(const u16 *id, unsigned char *s, len -= 2; } } +EXPORT_SYMBOL_GPL(ata_id_string); /** * ata_id_c_string - Convert IDENTIFY DEVICE page into C string @@ -1157,6 +1100,7 @@ void ata_id_c_string(const u16 *id, unsigned char *s, p--; *p = '\0'; } +EXPORT_SYMBOL_GPL(ata_id_c_string); static u64 ata_id_n_sectors(const u16 *id) { @@ -1514,6 +1458,7 @@ unsigned long ata_id_xfermask(const u16 *id) return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } +EXPORT_SYMBOL_GPL(ata_id_xfermask); static void ata_qc_complete_internal(struct ata_queued_cmd *qc) { @@ -1771,6 +1716,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) return 1; return 0; } +EXPORT_SYMBOL_GPL(ata_pio_need_iordy); /** * ata_pio_mask_no_iordy - Return the non IORDY mask @@ -1811,6 +1757,7 @@ unsigned int ata_do_dev_read_id(struct ata_device *dev, return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS, 0); } +EXPORT_SYMBOL_GPL(ata_do_dev_read_id); /** * ata_dev_read_id - Read ID data from the specified device @@ -2265,6 +2212,8 @@ static int ata_dev_config_ncq(struct ata_device *dev, desc[0] = '\0'; return 0; } + if (!IS_ENABLED(CONFIG_SATA_HOST)) + return 0; if (dev->horkage & ATA_HORKAGE_NONCQ) { snprintf(desc, desc_sz, "NCQ (not used)"); return 0; @@ -2783,6 +2732,7 @@ int ata_cable_40wire(struct ata_port *ap) { return ATA_CBL_PATA40; } +EXPORT_SYMBOL_GPL(ata_cable_40wire); /** * ata_cable_80wire - return 80 wire cable type @@ -2796,6 +2746,7 @@ int ata_cable_80wire(struct ata_port *ap) { return ATA_CBL_PATA80; } +EXPORT_SYMBOL_GPL(ata_cable_80wire); /** * ata_cable_unknown - return unknown PATA cable. @@ -2808,6 +2759,7 @@ int ata_cable_unknown(struct ata_port *ap) { return ATA_CBL_PATA_UNK; } +EXPORT_SYMBOL_GPL(ata_cable_unknown); /** * ata_cable_ignore - return ignored PATA cable. @@ -2820,6 +2772,7 @@ int ata_cable_ignore(struct ata_port *ap) { return ATA_CBL_PATA_IGN; } +EXPORT_SYMBOL_GPL(ata_cable_ignore); /** * ata_cable_sata - return SATA cable type @@ -2832,6 +2785,7 @@ int ata_cable_sata(struct ata_port *ap) { return ATA_CBL_SATA; } +EXPORT_SYMBOL_GPL(ata_cable_sata); /** * ata_bus_probe - Reset and probe ATA bus @@ -3014,6 +2968,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev) return NULL; return pair; } +EXPORT_SYMBOL_GPL(ata_dev_pair); /** * sata_down_spd_limit - adjust SATA spd limit downward @@ -3095,252 +3050,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) return 0; } -static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) -{ - struct ata_link *host_link = &link->ap->link; - u32 limit, target, spd; - - limit = link->sata_spd_limit; - - /* Don't configure downstream link faster than upstream link. - * It doesn't speed up anything and some PMPs choke on such - * configuration. - */ - if (!ata_is_host_link(link) && host_link->sata_spd) - limit &= (1 << host_link->sata_spd) - 1; - - if (limit == UINT_MAX) - target = 0; - else - target = fls(limit); - - spd = (*scontrol >> 4) & 0xf; - *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); - - return spd != target; -} - -/** - * sata_set_spd_needed - is SATA spd configuration needed - * @link: Link in question - * - * Test whether the spd limit in SControl matches - * @link->sata_spd_limit. This function is used to determine - * whether hardreset is necessary to apply SATA spd - * configuration. - * - * LOCKING: - * Inherited from caller. - * - * RETURNS: - * 1 if SATA spd configuration is needed, 0 otherwise. - */ -static int sata_set_spd_needed(struct ata_link *link) -{ - u32 scontrol; - - if (sata_scr_read(link, SCR_CONTROL, &scontrol)) - return 1; - - return __sata_set_spd_needed(link, &scontrol); -} - -/** - * sata_set_spd - set SATA spd according to spd limit - * @link: Link to set SATA spd for - * - * Set SATA spd of @link according to sata_spd_limit. - * - * LOCKING: - * Inherited from caller. - * - * RETURNS: - * 0 if spd doesn't need to be changed, 1 if spd has been - * changed. Negative errno if SCR registers are inaccessible. - */ -int sata_set_spd(struct ata_link *link) -{ - u32 scontrol; - int rc; - - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - return rc; - - if (!__sata_set_spd_needed(link, &scontrol)) - return 0; - - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) - return rc; - - return 1; -} - -/* - * This mode timing computation functionality is ported over from - * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik - */ -/* - * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). - * These were taken from ATA/ATAPI-6 standard, rev 0a, except - * for UDMA6, which is currently supported only by Maxtor drives. - * - * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. - */ - -static const struct ata_timing ata_timing[] = { -/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ - { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, - { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, - { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, - { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, - { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, - { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, - { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, - - { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, - { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, - { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, - - { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, - { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, - { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, - { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, - { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, - -/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ - { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, - { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, - { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, - { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, - { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, - { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, - { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, - - { 0xFF } -}; - -#define ENOUGH(v, unit) (((v)-1)/(unit)+1) -#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0) - -static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) -{ - q->setup = EZ(t->setup, T); - q->act8b = EZ(t->act8b, T); - q->rec8b = EZ(t->rec8b, T); - q->cyc8b = EZ(t->cyc8b, T); - q->active = EZ(t->active, T); - q->recover = EZ(t->recover, T); - q->dmack_hold = EZ(t->dmack_hold, T); - q->cycle = EZ(t->cycle, T); - q->udma = EZ(t->udma, UT); -} - -void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, - struct ata_timing *m, unsigned int what) -{ - if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); - if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); - if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); - if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); - if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); - if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); - if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); - if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); - if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); -} - -const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) -{ - const struct ata_timing *t = ata_timing; - - while (xfer_mode > t->mode) - t++; - - if (xfer_mode == t->mode) - return t; - - WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", - __func__, xfer_mode); - - return NULL; -} - -int ata_timing_compute(struct ata_device *adev, unsigned short speed, - struct ata_timing *t, int T, int UT) -{ - const u16 *id = adev->id; - const struct ata_timing *s; - struct ata_timing p; - - /* - * Find the mode. - */ - - if (!(s = ata_timing_find_mode(speed))) - return -EINVAL; - - memcpy(t, s, sizeof(*s)); - - /* - * If the drive is an EIDE drive, it can tell us it needs extended - * PIO/MW_DMA cycle timing. - */ - - if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ - memset(&p, 0, sizeof(p)); - - if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { - if (speed <= XFER_PIO_2) - p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; - else if ((speed <= XFER_PIO_4) || - (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) - p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; - } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) - p.cycle = id[ATA_ID_EIDE_DMA_MIN]; - - ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); - } - - /* - * Convert the timing to bus clock counts. - */ - - ata_timing_quantize(t, t, T, UT); - - /* - * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, - * S.M.A.R.T * and some other commands. We have to ensure that the - * DMA cycle timing is slower/equal than the fastest PIO timing. - */ - - if (speed > XFER_PIO_6) { - ata_timing_compute(adev, adev->pio_mode, &p, T, UT); - ata_timing_merge(&p, t, t, ATA_TIMING_ALL); - } - - /* - * Lengthen active & recovery time so that cycle time is correct. - */ - - if (t->act8b + t->rec8b < t->cyc8b) { - t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; - t->rec8b = t->cyc8b - t->act8b; - } - - if (t->active + t->recover < t->cycle) { - t->active += (t->cycle - (t->active + t->recover)) / 2; - t->recover = t->cycle - t->active; - } - - /* In a few cases quantisation may produce enough errors to - leave t->cycle too low for the sum of active and recovery - if so we must correct this */ - if (t->active + t->recover > t->cycle) - t->cycle = t->active + t->recover; - - return 0; -} - +#ifdef CONFIG_ATA_ACPI /** * ata_timing_cycle2mode - find xfer mode for the specified cycle duration * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. @@ -3391,6 +3101,7 @@ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) return last_mode; } +#endif /** * ata_down_xfermask_limit - adjust dev xfer masks downward @@ -3662,6 +3373,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) *r_failed_dev = dev; return rc; } +EXPORT_SYMBOL_GPL(ata_do_set_mode); /** * ata_wait_ready - wait for link to become ready @@ -3771,216 +3483,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, return ata_wait_ready(link, deadline, check_ready); } - -/** - * sata_link_debounce - debounce SATA phy status - * @link: ATA link to debounce SATA phy status for - * @params: timing parameters { interval, duration, timeout } in msec - * @deadline: deadline jiffies for the operation - * - * Make sure SStatus of @link reaches stable state, determined by - * holding the same value where DET is not 1 for @duration polled - * every @interval, before @timeout. Timeout constraints the - * beginning of the stable state. Because DET gets stuck at 1 on - * some controllers after hot unplugging, this functions waits - * until timeout then returns 0 if DET is stable at 1. - * - * @timeout is further limited by @deadline. The sooner of the - * two is used. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int sata_link_debounce(struct ata_link *link, const unsigned long *params, - unsigned long deadline) -{ - unsigned long interval = params[0]; - unsigned long duration = params[1]; - unsigned long last_jiffies, t; - u32 last, cur; - int rc; - - t = ata_deadline(jiffies, params[2]); - if (time_before(t, deadline)) - deadline = t; - - if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) - return rc; - cur &= 0xf; - - last = cur; - last_jiffies = jiffies; - - while (1) { - ata_msleep(link->ap, interval); - if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) - return rc; - cur &= 0xf; - - /* DET stable? */ - if (cur == last) { - if (cur == 1 && time_before(jiffies, deadline)) - continue; - if (time_after(jiffies, - ata_deadline(last_jiffies, duration))) - return 0; - continue; - } - - /* unstable, start over */ - last = cur; - last_jiffies = jiffies; - - /* Check deadline. If debouncing failed, return - * -EPIPE to tell upper layer to lower link speed. - */ - if (time_after(jiffies, deadline)) - return -EPIPE; - } -} - -/** - * sata_link_resume - resume SATA link - * @link: ATA link to resume SATA - * @params: timing parameters { interval, duration, timeout } in msec - * @deadline: deadline jiffies for the operation - * - * Resume SATA phy @link and debounce it. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int sata_link_resume(struct ata_link *link, const unsigned long *params, - unsigned long deadline) -{ - int tries = ATA_LINK_RESUME_TRIES; - u32 scontrol, serror; - int rc; - - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - return rc; - - /* - * Writes to SControl sometimes get ignored under certain - * controllers (ata_piix SIDPR). Make sure DET actually is - * cleared. - */ - do { - scontrol = (scontrol & 0x0f0) | 0x300; - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) - return rc; - /* - * Some PHYs react badly if SStatus is pounded - * immediately after resuming. Delay 200ms before - * debouncing. - */ - if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) - ata_msleep(link->ap, 200); - - /* is SControl restored correctly? */ - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - return rc; - } while ((scontrol & 0xf0f) != 0x300 && --tries); - - if ((scontrol & 0xf0f) != 0x300) { - ata_link_warn(link, "failed to resume link (SControl %X)\n", - scontrol); - return 0; - } - - if (tries < ATA_LINK_RESUME_TRIES) - ata_link_warn(link, "link resume succeeded after %d retries\n", - ATA_LINK_RESUME_TRIES - tries); - - if ((rc = sata_link_debounce(link, params, deadline))) - return rc; - - /* clear SError, some PHYs require this even for SRST to work */ - if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) - rc = sata_scr_write(link, SCR_ERROR, serror); - - return rc != -EINVAL ? rc : 0; -} - -/** - * sata_link_scr_lpm - manipulate SControl IPM and SPM fields - * @link: ATA link to manipulate SControl for - * @policy: LPM policy to configure - * @spm_wakeup: initiate LPM transition to active state - * - * Manipulate the IPM field of the SControl register of @link - * according to @policy. If @policy is ATA_LPM_MAX_POWER and - * @spm_wakeup is %true, the SPM field is manipulated to wake up - * the link. This function also clears PHYRDY_CHG before - * returning. - * - * LOCKING: - * EH context. - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, - bool spm_wakeup) -{ - struct ata_eh_context *ehc = &link->eh_context; - bool woken_up = false; - u32 scontrol; - int rc; - - rc = sata_scr_read(link, SCR_CONTROL, &scontrol); - if (rc) - return rc; - - switch (policy) { - case ATA_LPM_MAX_POWER: - /* disable all LPM transitions */ - scontrol |= (0x7 << 8); - /* initiate transition to active state */ - if (spm_wakeup) { - scontrol |= (0x4 << 12); - woken_up = true; - } - break; - case ATA_LPM_MED_POWER: - /* allow LPM to PARTIAL */ - scontrol &= ~(0x1 << 8); - scontrol |= (0x6 << 8); - break; - case ATA_LPM_MED_POWER_WITH_DIPM: - case ATA_LPM_MIN_POWER_WITH_PARTIAL: - case ATA_LPM_MIN_POWER: - if (ata_link_nr_enabled(link) > 0) - /* no restrictions on LPM transitions */ - scontrol &= ~(0x7 << 8); - else { - /* empty port, power off */ - scontrol &= ~0xf; - scontrol |= (0x1 << 2); - } - break; - default: - WARN_ON(1); - } - - rc = sata_scr_write(link, SCR_CONTROL, scontrol); - if (rc) - return rc; - - /* give the link time to transit out of LPM state */ - if (woken_up) - msleep(10); - - /* clear PHYRDY_CHG from SError */ - ehc->i.serror &= ~SERR_PHYRDY_CHG; - return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); -} +EXPORT_SYMBOL_GPL(ata_wait_after_reset); /** * ata_std_prereset - prepare for reset @@ -4026,118 +3529,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) return 0; } - -/** - * sata_link_hardreset - reset link via SATA phy reset - * @link: link to reset - * @timing: timing parameters { interval, duration, timeout } in msec - * @deadline: deadline jiffies for the operation - * @online: optional out parameter indicating link onlineness - * @check_ready: optional callback to check link readiness - * - * SATA phy-reset @link using DET bits of SControl register. - * After hardreset, link readiness is waited upon using - * ata_wait_ready() if @check_ready is specified. LLDs are - * allowed to not specify @check_ready and wait itself after this - * function returns. Device classification is LLD's - * responsibility. - * - * *@online is set to one iff reset succeeded and @link is online - * after reset. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, - unsigned long deadline, - bool *online, int (*check_ready)(struct ata_link *)) -{ - u32 scontrol; - int rc; - - DPRINTK("ENTER\n"); - - if (online) - *online = false; - - if (sata_set_spd_needed(link)) { - /* SATA spec says nothing about how to reconfigure - * spd. To be on the safe side, turn off phy during - * reconfiguration. This works for at least ICH7 AHCI - * and Sil3124. - */ - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - goto out; - - scontrol = (scontrol & 0x0f0) | 0x304; - - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) - goto out; - - sata_set_spd(link); - } - - /* issue phy wake/reset */ - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - goto out; - - scontrol = (scontrol & 0x0f0) | 0x301; - - if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) - goto out; - - /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 - * 10.4.2 says at least 1 ms. - */ - ata_msleep(link->ap, 1); - - /* bring link back */ - rc = sata_link_resume(link, timing, deadline); - if (rc) - goto out; - /* if link is offline nothing more to do */ - if (ata_phys_link_offline(link)) - goto out; - - /* Link is online. From this point, -ENODEV too is an error. */ - if (online) - *online = true; - - if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { - /* If PMP is supported, we have to do follow-up SRST. - * Some PMPs don't send D2H Reg FIS after hardreset if - * the first port is empty. Wait only for - * ATA_TMOUT_PMP_SRST_WAIT. - */ - if (check_ready) { - unsigned long pmp_deadline; - - pmp_deadline = ata_deadline(jiffies, - ATA_TMOUT_PMP_SRST_WAIT); - if (time_after(pmp_deadline, deadline)) - pmp_deadline = deadline; - ata_wait_ready(link, pmp_deadline, check_ready); - } - rc = -EAGAIN; - goto out; - } - - rc = 0; - if (check_ready) - rc = ata_wait_ready(link, deadline, check_ready); - out: - if (rc && rc != -EAGAIN) { - /* online is set iff link is online && reset succeeded */ - if (online) - *online = false; - ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); - } - DPRINTK("EXIT, rc=%d\n", rc); - return rc; -} +EXPORT_SYMBOL_GPL(ata_std_prereset); /** * sata_std_hardreset - COMRESET w/o waiting or classification @@ -4164,6 +3556,7 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class, rc = sata_link_hardreset(link, timing, deadline, &online, NULL); return online ? -EAGAIN : rc; } +EXPORT_SYMBOL_GPL(sata_std_hardreset); /** * ata_std_postreset - standard postreset callback @@ -4192,6 +3585,7 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes) DPRINTK("EXIT\n"); } +EXPORT_SYMBOL_GPL(ata_std_postreset); /** * ata_dev_same_device - Determine whether new ID matches configured device @@ -4979,11 +4373,13 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) return ATA_DEFER_LINK; } +EXPORT_SYMBOL_GPL(ata_std_qc_defer); enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) { return AC_ERR_OK; } +EXPORT_SYMBOL_GPL(ata_noop_qc_prep); /** * ata_sg_init - Associate command with scatter-gather table. @@ -5327,6 +4723,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) __ata_qc_complete(qc); } } +EXPORT_SYMBOL_GPL(ata_qc_complete); /** * ata_qc_get_active - get bitmask of active qcs @@ -5353,64 +4750,6 @@ u64 ata_qc_get_active(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_qc_get_active); /** - * ata_qc_complete_multiple - Complete multiple qcs successfully - * @ap: port in question - * @qc_active: new qc_active mask - * - * Complete in-flight commands. This functions is meant to be - * called from low-level driver's interrupt routine to complete - * requests normally. ap->qc_active and @qc_active is compared - * and commands are completed accordingly. - * - * Always use this function when completing multiple NCQ commands - * from IRQ handlers instead of calling ata_qc_complete() - * multiple times to keep IRQ expect status properly in sync. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * Number of completed commands on success, -errno otherwise. - */ -int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) -{ - u64 done_mask, ap_qc_active = ap->qc_active; - int nr_done = 0; - - /* - * If the internal tag is set on ap->qc_active, then we care about - * bit0 on the passed in qc_active mask. Move that bit up to match - * the internal tag. - */ - if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { - qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; - qc_active ^= qc_active & 0x01; - } - - done_mask = ap_qc_active ^ qc_active; - - if (unlikely(done_mask & qc_active)) { - ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", - ap->qc_active, qc_active); - return -EINVAL; - } - - while (done_mask) { - struct ata_queued_cmd *qc; - unsigned int tag = __ffs64(done_mask); - - qc = ata_qc_from_tag(ap, tag); - if (qc) { - ata_qc_complete(qc); - nr_done++; - } - done_mask &= ~(1ULL << tag); - } - - return nr_done; -} - -/** * ata_qc_issue - issue taskfile to device * @qc: command to issue to device * @@ -5486,111 +4825,6 @@ err: } /** - * sata_scr_valid - test whether SCRs are accessible - * @link: ATA link to test SCR accessibility for - * - * Test whether SCRs are accessible for @link. - * - * LOCKING: - * None. - * - * RETURNS: - * 1 if SCRs are accessible, 0 otherwise. - */ -int sata_scr_valid(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - - return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; -} - -/** - * sata_scr_read - read SCR register of the specified port - * @link: ATA link to read SCR for - * @reg: SCR to read - * @val: Place to store read value - * - * Read SCR register @reg of @link into *@val. This function is - * guaranteed to succeed if @link is ap->link, the cable type of - * the port is SATA and the port implements ->scr_read. - * - * LOCKING: - * None if @link is ap->link. Kernel thread context otherwise. - * - * RETURNS: - * 0 on success, negative errno on failure. - */ -int sata_scr_read(struct ata_link *link, int reg, u32 *val) -{ - if (ata_is_host_link(link)) { - if (sata_scr_valid(link)) - return link->ap->ops->scr_read(link, reg, val); - return -EOPNOTSUPP; - } - - return sata_pmp_scr_read(link, reg, val); -} - -/** - * sata_scr_write - write SCR register of the specified port - * @link: ATA link to write SCR for - * @reg: SCR to write - * @val: value to write - * - * Write @val to SCR register @reg of @link. This function is - * guaranteed to succeed if @link is ap->link, the cable type of - * the port is SATA and the port implements ->scr_read. - * - * LOCKING: - * None if @link is ap->link. Kernel thread context otherwise. - * - * RETURNS: - * 0 on success, negative errno on failure. - */ -int sata_scr_write(struct ata_link *link, int reg, u32 val) -{ - if (ata_is_host_link(link)) { - if (sata_scr_valid(link)) - return link->ap->ops->scr_write(link, reg, val); - return -EOPNOTSUPP; - } - - return sata_pmp_scr_write(link, reg, val); -} - -/** - * sata_scr_write_flush - write SCR register of the specified port and flush - * @link: ATA link to write SCR for - * @reg: SCR to write - * @val: value to write - * - * This function is identical to sata_scr_write() except that this - * function performs flush after writing to the register. - * - * LOCKING: - * None if @link is ap->link. Kernel thread context otherwise. - * - * RETURNS: - * 0 on success, negative errno on failure. - */ -int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) -{ - if (ata_is_host_link(link)) { - int rc; - - if (sata_scr_valid(link)) { - rc = link->ap->ops->scr_write(link, reg, val); - if (rc == 0) - rc = link->ap->ops->scr_read(link, reg, &val); - return rc; - } - return -EOPNOTSUPP; - } - - return sata_pmp_scr_write(link, reg, val); -} - -/** * ata_phys_link_online - test whether the given link is online * @link: ATA link to test * @@ -5663,6 +4897,7 @@ bool ata_link_online(struct ata_link *link) return ata_phys_link_online(link) || (slave && ata_phys_link_online(slave)); } +EXPORT_SYMBOL_GPL(ata_link_online); /** * ata_link_offline - test whether the given link is offline @@ -5689,6 +4924,7 @@ bool ata_link_offline(struct ata_link *link) return ata_phys_link_offline(link) && (!slave || ata_phys_link_offline(slave)); } +EXPORT_SYMBOL_GPL(ata_link_offline); #ifdef CONFIG_PM static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, @@ -5875,6 +5111,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) host->dev->power.power_state = mesg; return 0; } +EXPORT_SYMBOL_GPL(ata_host_suspend); /** * ata_host_resume - resume host @@ -5886,6 +5123,7 @@ void ata_host_resume(struct ata_host *host) { host->dev->power.power_state = PMSG_ON; } +EXPORT_SYMBOL_GPL(ata_host_resume); #endif const struct device_type ata_port_type = { @@ -6105,6 +5343,7 @@ void ata_host_put(struct ata_host *host) { kref_put(&host->kref, ata_host_release); } +EXPORT_SYMBOL_GPL(ata_host_put); /** * ata_host_alloc - allocate and init basic ATA host resources @@ -6178,6 +5417,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) kfree(host); return NULL; } +EXPORT_SYMBOL_GPL(ata_host_alloc); /** * ata_host_alloc_pinfo - alloc host and init with port_info array @@ -6226,68 +5466,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, return host; } - -/** - * ata_slave_link_init - initialize slave link - * @ap: port to initialize slave link for - * - * Create and initialize slave link for @ap. This enables slave - * link handling on the port. - * - * In libata, a port contains links and a link contains devices. - * There is single host link but if a PMP is attached to it, - * there can be multiple fan-out links. On SATA, there's usually - * a single device connected to a link but PATA and SATA - * controllers emulating TF based interface can have two - master - * and slave. - * - * However, there are a few controllers which don't fit into this - * abstraction too well - SATA controllers which emulate TF - * interface with both master and slave devices but also have - * separate SCR register sets for each device. These controllers - * need separate links for physical link handling - * (e.g. onlineness, link speed) but should be treated like a - * traditional M/S controller for everything else (e.g. command - * issue, softreset). - * - * slave_link is libata's way of handling this class of - * controllers without impacting core layer too much. For - * anything other than physical link handling, the default host - * link is used for both master and slave. For physical link - * handling, separate @ap->slave_link is used. All dirty details - * are implemented inside libata core layer. From LLD's POV, the - * only difference is that prereset, hardreset and postreset are - * called once more for the slave link, so the reset sequence - * looks like the following. - * - * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> - * softreset(M) -> postreset(M) -> postreset(S) - * - * Note that softreset is called only for the master. Softreset - * resets both M/S by definition, so SRST on master should handle - * both (the standard method will work just fine). - * - * LOCKING: - * Should be called before host is registered. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int ata_slave_link_init(struct ata_port *ap) -{ - struct ata_link *link; - - WARN_ON(ap->slave_link); - WARN_ON(ap->flags & ATA_FLAG_PMP); - - link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) - return -ENOMEM; - - ata_link_init(ap, link, 1); - ap->slave_link = link; - return 0; -} +EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); static void ata_host_stop(struct device *gendev, void *res) { @@ -6436,6 +5615,7 @@ int ata_host_start(struct ata_host *host) devres_free(start_dr); return rc; } +EXPORT_SYMBOL_GPL(ata_host_start); /** * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) @@ -6454,6 +5634,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, host->ops = ops; kref_init(&host->kref); } +EXPORT_SYMBOL_GPL(ata_host_init); void __ata_port_probe(struct ata_port *ap) { @@ -6609,6 +5790,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) return rc; } +EXPORT_SYMBOL_GPL(ata_host_register); /** * ata_host_activate - start host, request IRQ and register it @@ -6671,6 +5853,7 @@ int ata_host_activate(struct ata_host *host, int irq, return rc; } +EXPORT_SYMBOL_GPL(ata_host_activate); /** * ata_port_detach - Detach ATA port in preparation of device removal @@ -6746,6 +5929,7 @@ void ata_host_detach(struct ata_host *host) /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); } +EXPORT_SYMBOL_GPL(ata_host_detach); #ifdef CONFIG_PCI @@ -6766,6 +5950,7 @@ void ata_pci_remove_one(struct pci_dev *pdev) ata_host_detach(host); } +EXPORT_SYMBOL_GPL(ata_pci_remove_one); void ata_pci_shutdown_one(struct pci_dev *pdev) { @@ -6786,6 +5971,7 @@ void ata_pci_shutdown_one(struct pci_dev *pdev) ap->ops->port_stop(ap); } } +EXPORT_SYMBOL_GPL(ata_pci_shutdown_one); /* move to PCI subsystem */ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) @@ -6820,6 +6006,7 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) return (tmp == bits->val) ? 1 : 0; } +EXPORT_SYMBOL_GPL(pci_test_config_bits); #ifdef CONFIG_PM void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) @@ -6830,6 +6017,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) if (mesg.event & PM_EVENT_SLEEP) pci_set_power_state(pdev, PCI_D3hot); } +EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); int ata_pci_device_do_resume(struct pci_dev *pdev) { @@ -6848,6 +6036,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev) pci_set_master(pdev); return 0; } +EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { @@ -6862,6 +6051,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) return 0; } +EXPORT_SYMBOL_GPL(ata_pci_device_suspend); int ata_pci_device_resume(struct pci_dev *pdev) { @@ -6873,8 +6063,8 @@ int ata_pci_device_resume(struct pci_dev *pdev) ata_host_resume(host); return rc; } +EXPORT_SYMBOL_GPL(ata_pci_device_resume); #endif /* CONFIG_PM */ - #endif /* CONFIG_PCI */ /** @@ -6896,7 +6086,9 @@ int ata_platform_remove_one(struct platform_device *pdev) return 0; } +EXPORT_SYMBOL_GPL(ata_platform_remove_one); +#ifdef CONFIG_ATA_FORCE static int __init ata_parse_force_one(char **cur, struct ata_force_ent *force_ent, const char **reason) @@ -7076,6 +6268,15 @@ static void __init ata_parse_force_param(void) ata_force_tbl_size = idx; } +static void ata_free_force_param(void) +{ + kfree(ata_force_tbl); +} +#else +static inline void ata_parse_force_param(void) { } +static inline void ata_free_force_param(void) { } +#endif + static int __init ata_init(void) { int rc; @@ -7084,7 +6285,7 @@ static int __init ata_init(void) rc = ata_sff_init(); if (rc) { - kfree(ata_force_tbl); + ata_free_force_param(); return rc; } @@ -7108,7 +6309,7 @@ static void __exit ata_exit(void) ata_release_transport(ata_scsi_transport_template); libata_transport_exit(); ata_sff_exit(); - kfree(ata_force_tbl); + ata_free_force_param(); } subsys_initcall(ata_init); @@ -7120,6 +6321,7 @@ int ata_ratelimit(void) { return __ratelimit(&ratelimit); } +EXPORT_SYMBOL_GPL(ata_ratelimit); /** * ata_msleep - ATA EH owner aware msleep @@ -7152,6 +6354,7 @@ void ata_msleep(struct ata_port *ap, unsigned int msecs) if (owns_eh) ata_eh_acquire(ap); } +EXPORT_SYMBOL_GPL(ata_msleep); /** * ata_wait_register - wait until register value changes @@ -7198,38 +6401,7 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, return tmp; } - -/** - * sata_lpm_ignore_phy_events - test if PHY event should be ignored - * @link: Link receiving the event - * - * Test whether the received PHY event has to be ignored or not. - * - * LOCKING: - * None: - * - * RETURNS: - * True if the event has to be ignored. - */ -bool sata_lpm_ignore_phy_events(struct ata_link *link) -{ - unsigned long lpm_timeout = link->last_lpm_change + - msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); - - /* if LPM is enabled, PHYRDY doesn't mean anything */ - if (link->lpm_policy > ATA_LPM_MAX_POWER) - return true; - - /* ignore the first PHY event after the LPM policy changed - * as it is might be spurious - */ - if ((link->flags & ATA_LFLAG_CHANGED) && - time_before(jiffies, lpm_timeout)) - return true; - - return false; -} -EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); +EXPORT_SYMBOL_GPL(ata_wait_register); /* * Dummy port_ops @@ -7251,10 +6423,12 @@ struct ata_port_operations ata_dummy_port_ops = { .sched_eh = ata_std_sched_eh, .end_eh = ata_std_end_eh, }; +EXPORT_SYMBOL_GPL(ata_dummy_port_ops); const struct ata_port_info ata_dummy_port_info = { .port_ops = &ata_dummy_port_ops, }; +EXPORT_SYMBOL_GPL(ata_dummy_port_info); /* * Utility print functions @@ -7322,127 +6496,3 @@ void ata_print_version(const struct device *dev, const char *version) dev_printk(KERN_DEBUG, dev, "version %s\n", version); } EXPORT_SYMBOL(ata_print_version); - -/* - * libata is essentially a library of internal helper functions for - * low-level ATA host controller drivers. As such, the API/ABI is - * likely to change as new drivers are added and updated. - * Do not depend on ABI/API stability. - */ -EXPORT_SYMBOL_GPL(sata_deb_timing_normal); -EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); -EXPORT_SYMBOL_GPL(sata_deb_timing_long); -EXPORT_SYMBOL_GPL(ata_base_port_ops); -EXPORT_SYMBOL_GPL(sata_port_ops); -EXPORT_SYMBOL_GPL(ata_dummy_port_ops); -EXPORT_SYMBOL_GPL(ata_dummy_port_info); -EXPORT_SYMBOL_GPL(ata_link_next); -EXPORT_SYMBOL_GPL(ata_dev_next); -EXPORT_SYMBOL_GPL(ata_std_bios_param); -EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); -EXPORT_SYMBOL_GPL(ata_host_init); -EXPORT_SYMBOL_GPL(ata_host_alloc); -EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); -EXPORT_SYMBOL_GPL(ata_slave_link_init); -EXPORT_SYMBOL_GPL(ata_host_start); -EXPORT_SYMBOL_GPL(ata_host_register); -EXPORT_SYMBOL_GPL(ata_host_activate); -EXPORT_SYMBOL_GPL(ata_host_detach); -EXPORT_SYMBOL_GPL(ata_sg_init); -EXPORT_SYMBOL_GPL(ata_qc_complete); -EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); -EXPORT_SYMBOL_GPL(atapi_cmd_type); -EXPORT_SYMBOL_GPL(ata_tf_to_fis); -EXPORT_SYMBOL_GPL(ata_tf_from_fis); -EXPORT_SYMBOL_GPL(ata_pack_xfermask); -EXPORT_SYMBOL_GPL(ata_unpack_xfermask); -EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); -EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); -EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); -EXPORT_SYMBOL_GPL(ata_mode_string); -EXPORT_SYMBOL_GPL(ata_id_xfermask); -EXPORT_SYMBOL_GPL(ata_do_set_mode); -EXPORT_SYMBOL_GPL(ata_std_qc_defer); -EXPORT_SYMBOL_GPL(ata_noop_qc_prep); -EXPORT_SYMBOL_GPL(ata_dev_disable); -EXPORT_SYMBOL_GPL(sata_set_spd); -EXPORT_SYMBOL_GPL(ata_wait_after_reset); -EXPORT_SYMBOL_GPL(sata_link_debounce); -EXPORT_SYMBOL_GPL(sata_link_resume); -EXPORT_SYMBOL_GPL(sata_link_scr_lpm); -EXPORT_SYMBOL_GPL(ata_std_prereset); -EXPORT_SYMBOL_GPL(sata_link_hardreset); -EXPORT_SYMBOL_GPL(sata_std_hardreset); -EXPORT_SYMBOL_GPL(ata_std_postreset); -EXPORT_SYMBOL_GPL(ata_dev_classify); -EXPORT_SYMBOL_GPL(ata_dev_pair); -EXPORT_SYMBOL_GPL(ata_ratelimit); -EXPORT_SYMBOL_GPL(ata_msleep); -EXPORT_SYMBOL_GPL(ata_wait_register); -EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); -EXPORT_SYMBOL_GPL(ata_scsi_slave_config); -EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); -EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); -EXPORT_SYMBOL_GPL(__ata_change_queue_depth); -EXPORT_SYMBOL_GPL(sata_scr_valid); -EXPORT_SYMBOL_GPL(sata_scr_read); -EXPORT_SYMBOL_GPL(sata_scr_write); -EXPORT_SYMBOL_GPL(sata_scr_write_flush); -EXPORT_SYMBOL_GPL(ata_link_online); -EXPORT_SYMBOL_GPL(ata_link_offline); -#ifdef CONFIG_PM -EXPORT_SYMBOL_GPL(ata_host_suspend); -EXPORT_SYMBOL_GPL(ata_host_resume); -#endif /* CONFIG_PM */ -EXPORT_SYMBOL_GPL(ata_id_string); -EXPORT_SYMBOL_GPL(ata_id_c_string); -EXPORT_SYMBOL_GPL(ata_do_dev_read_id); -EXPORT_SYMBOL_GPL(ata_scsi_simulate); - -EXPORT_SYMBOL_GPL(ata_pio_need_iordy); -EXPORT_SYMBOL_GPL(ata_timing_find_mode); -EXPORT_SYMBOL_GPL(ata_timing_compute); -EXPORT_SYMBOL_GPL(ata_timing_merge); -EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); - -#ifdef CONFIG_PCI -EXPORT_SYMBOL_GPL(pci_test_config_bits); -EXPORT_SYMBOL_GPL(ata_pci_shutdown_one); -EXPORT_SYMBOL_GPL(ata_pci_remove_one); -#ifdef CONFIG_PM -EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); -EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); -EXPORT_SYMBOL_GPL(ata_pci_device_suspend); -EXPORT_SYMBOL_GPL(ata_pci_device_resume); -#endif /* CONFIG_PM */ -#endif /* CONFIG_PCI */ - -EXPORT_SYMBOL_GPL(ata_platform_remove_one); - -EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); -EXPORT_SYMBOL_GPL(ata_ehi_push_desc); -EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); -EXPORT_SYMBOL_GPL(ata_port_desc); -#ifdef CONFIG_PCI -EXPORT_SYMBOL_GPL(ata_port_pbar_desc); -#endif /* CONFIG_PCI */ -EXPORT_SYMBOL_GPL(ata_port_schedule_eh); -EXPORT_SYMBOL_GPL(ata_link_abort); -EXPORT_SYMBOL_GPL(ata_port_abort); -EXPORT_SYMBOL_GPL(ata_port_freeze); -EXPORT_SYMBOL_GPL(sata_async_notification); -EXPORT_SYMBOL_GPL(ata_eh_freeze_port); -EXPORT_SYMBOL_GPL(ata_eh_thaw_port); -EXPORT_SYMBOL_GPL(ata_eh_qc_complete); -EXPORT_SYMBOL_GPL(ata_eh_qc_retry); -EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); -EXPORT_SYMBOL_GPL(ata_do_eh); -EXPORT_SYMBOL_GPL(ata_std_error_handler); - -EXPORT_SYMBOL_GPL(ata_cable_40wire); -EXPORT_SYMBOL_GPL(ata_cable_80wire); -EXPORT_SYMBOL_GPL(ata_cable_unknown); -EXPORT_SYMBOL_GPL(ata_cable_ignore); -EXPORT_SYMBOL_GPL(ata_cable_sata); -EXPORT_SYMBOL_GPL(ata_host_get); -EXPORT_SYMBOL_GPL(ata_host_put); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3bfd9da58473..474c6c34fe02 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2,10 +2,6 @@ /* * libata-eh.c - libata error handling * - * Maintained by: Tejun Heo <tj@kernel.org> - * Please ALWAYS copy linux-ide@vger.kernel.org - * on emails. - * * Copyright 2006 Tejun Heo <htejun@gmail.com> * * libata documentation is available via 'make {ps|pdf}docs', @@ -184,6 +180,7 @@ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } +EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); /** * ata_ehi_push_desc - push error description with separator @@ -207,6 +204,7 @@ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } +EXPORT_SYMBOL_GPL(ata_ehi_push_desc); /** * ata_ehi_clear_desc - clean error description @@ -222,6 +220,7 @@ void ata_ehi_clear_desc(struct ata_eh_info *ehi) ehi->desc[0] = '\0'; ehi->desc_len = 0; } +EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); /** * ata_port_desc - append port description @@ -249,9 +248,9 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...) __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); va_end(args); } +EXPORT_SYMBOL_GPL(ata_port_desc); #ifdef CONFIG_PCI - /** * ata_port_pbar_desc - append PCI BAR description * @ap: target ATA port @@ -288,7 +287,7 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, ata_port_desc(ap, "%s 0x%llx", name, start + (unsigned long long)offset); } - +EXPORT_SYMBOL_GPL(ata_port_pbar_desc); #endif /* CONFIG_PCI */ static int ata_lookup_timeout_table(u8 cmd) @@ -973,6 +972,7 @@ void ata_port_schedule_eh(struct ata_port *ap) /* see: ata_std_sched_eh, unless you know better */ ap->ops->sched_eh(ap); } +EXPORT_SYMBOL_GPL(ata_port_schedule_eh); static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { @@ -1015,6 +1015,7 @@ int ata_link_abort(struct ata_link *link) { return ata_do_link_abort(link->ap, link); } +EXPORT_SYMBOL_GPL(ata_link_abort); /** * ata_port_abort - abort all qc's on the port @@ -1032,6 +1033,7 @@ int ata_port_abort(struct ata_port *ap) { return ata_do_link_abort(ap, NULL); } +EXPORT_SYMBOL_GPL(ata_port_abort); /** * __ata_port_freeze - freeze port @@ -1088,79 +1090,7 @@ int ata_port_freeze(struct ata_port *ap) return nr_aborted; } - -/** - * sata_async_notification - SATA async notification handler - * @ap: ATA port where async notification is received - * - * Handler to be called when async notification via SDB FIS is - * received. This function schedules EH if necessary. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * 1 if EH is scheduled, 0 otherwise. - */ -int sata_async_notification(struct ata_port *ap) -{ - u32 sntf; - int rc; - - if (!(ap->flags & ATA_FLAG_AN)) - return 0; - - rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); - if (rc == 0) - sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); - - if (!sata_pmp_attached(ap) || rc) { - /* PMP is not attached or SNTF is not available */ - if (!sata_pmp_attached(ap)) { - /* PMP is not attached. Check whether ATAPI - * AN is configured. If so, notify media - * change. - */ - struct ata_device *dev = ap->link.device; - - if ((dev->class == ATA_DEV_ATAPI) && - (dev->flags & ATA_DFLAG_AN)) - ata_scsi_media_change_notify(dev); - return 0; - } else { - /* PMP is attached but SNTF is not available. - * ATAPI async media change notification is - * not used. The PMP must be reporting PHY - * status change, schedule EH. - */ - ata_port_schedule_eh(ap); - return 1; - } - } else { - /* PMP is attached and SNTF is available */ - struct ata_link *link; - - /* check and notify ATAPI AN */ - ata_for_each_link(link, ap, EDGE) { - if (!(sntf & (1 << link->pmp))) - continue; - - if ((link->device->class == ATA_DEV_ATAPI) && - (link->device->flags & ATA_DFLAG_AN)) - ata_scsi_media_change_notify(link->device); - } - - /* If PMP is reporting that PHY status of some - * downstream ports has changed, schedule EH. - */ - if (sntf & (1 << SATA_PMP_CTRL_PORT)) { - ata_port_schedule_eh(ap); - return 1; - } - - return 0; - } -} +EXPORT_SYMBOL_GPL(ata_port_freeze); /** * ata_eh_freeze_port - EH helper to freeze port @@ -1182,6 +1112,7 @@ void ata_eh_freeze_port(struct ata_port *ap) __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); } +EXPORT_SYMBOL_GPL(ata_eh_freeze_port); /** * ata_port_thaw_port - EH helper to thaw port @@ -1289,6 +1220,7 @@ void ata_dev_disable(struct ata_device *dev) */ ata_ering_clear(&dev->ering); } +EXPORT_SYMBOL_GPL(ata_dev_disable); /** * ata_eh_detach_dev - detach ATA device @@ -1420,62 +1352,6 @@ static const char *ata_err_string(unsigned int err_mask) } /** - * ata_eh_read_log_10h - Read log page 10h for NCQ error details - * @dev: Device to read log page 10h from - * @tag: Resulting tag of the failed command - * @tf: Resulting taskfile registers of the failed command - * - * Read log page 10h to obtain NCQ error details and clear error - * condition. - * - * LOCKING: - * Kernel thread context (may sleep). - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -static int ata_eh_read_log_10h(struct ata_device *dev, - int *tag, struct ata_taskfile *tf) -{ - u8 *buf = dev->link->ap->sector_buf; - unsigned int err_mask; - u8 csum; - int i; - - err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); - if (err_mask) - return -EIO; - - csum = 0; - for (i = 0; i < ATA_SECT_SIZE; i++) - csum += buf[i]; - if (csum) - ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", - csum); - - if (buf[0] & 0x80) - return -ENOENT; - - *tag = buf[0] & 0x1f; - - tf->command = buf[2]; - tf->feature = buf[3]; - tf->lbal = buf[4]; - tf->lbam = buf[5]; - tf->lbah = buf[6]; - tf->device = buf[7]; - tf->hob_lbal = buf[8]; - tf->hob_lbam = buf[9]; - tf->hob_lbah = buf[10]; - tf->nsect = buf[12]; - tf->hob_nsect = buf[13]; - if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) - tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; - - return 0; -} - -/** * atapi_eh_tur - perform ATAPI TEST_UNIT_READY * @dev: target ATAPI device * @r_sense_key: out parameter for sense_key @@ -1659,80 +1535,6 @@ static void ata_eh_analyze_serror(struct ata_link *link) } /** - * ata_eh_analyze_ncq_error - analyze NCQ error - * @link: ATA link to analyze NCQ error for - * - * Read log page 10h, determine the offending qc and acquire - * error status TF. For NCQ device errors, all LLDDs have to do - * is setting AC_ERR_DEV in ehi->err_mask. This function takes - * care of the rest. - * - * LOCKING: - * Kernel thread context (may sleep). - */ -void ata_eh_analyze_ncq_error(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - struct ata_eh_context *ehc = &link->eh_context; - struct ata_device *dev = link->device; - struct ata_queued_cmd *qc; - struct ata_taskfile tf; - int tag, rc; - - /* if frozen, we can't do much */ - if (ap->pflags & ATA_PFLAG_FROZEN) - return; - - /* is it NCQ device error? */ - if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) - return; - - /* has LLDD analyzed already? */ - ata_qc_for_each_raw(ap, qc, tag) { - if (!(qc->flags & ATA_QCFLAG_FAILED)) - continue; - - if (qc->err_mask) - return; - } - - /* okay, this error is ours */ - memset(&tf, 0, sizeof(tf)); - rc = ata_eh_read_log_10h(dev, &tag, &tf); - if (rc) { - ata_link_err(link, "failed to read log page 10h (errno=%d)\n", - rc); - return; - } - - if (!(link->sactive & (1 << tag))) { - ata_link_err(link, "log page 10h reported inactive tag %d\n", - tag); - return; - } - - /* we've got the perpetrator, condemn it */ - qc = __ata_qc_from_tag(ap, tag); - memcpy(&qc->result_tf, &tf, sizeof(tf)); - qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; - qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; - if (dev->class == ATA_DEV_ZAC && - ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { - char sense_key, asc, ascq; - - sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; - asc = (qc->result_tf.auxiliary >> 8) & 0xff; - ascq = qc->result_tf.auxiliary & 0xff; - ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq); - ata_scsi_set_sense_information(dev, qc->scsicmd, - &qc->result_tf); - qc->flags |= ATA_QCFLAG_SENSE_VALID; - } - - ehc->i.err_mask &= ~AC_ERR_DEV; -} - -/** * ata_eh_analyze_tf - analyze taskfile of a failed qc * @qc: qc to analyze * @tf: Taskfile registers to analyze @@ -3436,7 +3238,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, int rc; /* if the link or host doesn't do LPM, noop */ - if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) + if (!IS_ENABLED(CONFIG_SATA_HOST) || + (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) return 0; /* @@ -4052,6 +3855,7 @@ void ata_std_error_handler(struct ata_port *ap) ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); } +EXPORT_SYMBOL_GPL(ata_std_error_handler); #ifdef CONFIG_PM /** diff --git a/drivers/ata/libata-pata-timings.c b/drivers/ata/libata-pata-timings.c new file mode 100644 index 000000000000..af341226cc64 --- /dev/null +++ b/drivers/ata/libata-pata-timings.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Helper library for PATA timings + * + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/libata.h> + +/* + * This mode timing computation functionality is ported over from + * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik + */ +/* + * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). + * These were taken from ATA/ATAPI-6 standard, rev 0a, except + * for UDMA6, which is currently supported only by Maxtor drives. + * + * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. + */ + +static const struct ata_timing ata_timing[] = { +/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ + { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, + { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, + { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, + { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, + { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, + { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, + { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, + + { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, + { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, + + { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, + { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, + { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, + { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, + +/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ + { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, + { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, + { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, + { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, + { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, + + { 0xFF } +}; + +#define ENOUGH(v, unit) (((v)-1)/(unit)+1) +#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0) + +static void ata_timing_quantize(const struct ata_timing *t, + struct ata_timing *q, int T, int UT) +{ + q->setup = EZ(t->setup, T); + q->act8b = EZ(t->act8b, T); + q->rec8b = EZ(t->rec8b, T); + q->cyc8b = EZ(t->cyc8b, T); + q->active = EZ(t->active, T); + q->recover = EZ(t->recover, T); + q->dmack_hold = EZ(t->dmack_hold, T); + q->cycle = EZ(t->cycle, T); + q->udma = EZ(t->udma, UT); +} + +void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, + struct ata_timing *m, unsigned int what) +{ + if (what & ATA_TIMING_SETUP) + m->setup = max(a->setup, b->setup); + if (what & ATA_TIMING_ACT8B) + m->act8b = max(a->act8b, b->act8b); + if (what & ATA_TIMING_REC8B) + m->rec8b = max(a->rec8b, b->rec8b); + if (what & ATA_TIMING_CYC8B) + m->cyc8b = max(a->cyc8b, b->cyc8b); + if (what & ATA_TIMING_ACTIVE) + m->active = max(a->active, b->active); + if (what & ATA_TIMING_RECOVER) + m->recover = max(a->recover, b->recover); + if (what & ATA_TIMING_DMACK_HOLD) + m->dmack_hold = max(a->dmack_hold, b->dmack_hold); + if (what & ATA_TIMING_CYCLE) + m->cycle = max(a->cycle, b->cycle); + if (what & ATA_TIMING_UDMA) + m->udma = max(a->udma, b->udma); +} +EXPORT_SYMBOL_GPL(ata_timing_merge); + +const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) +{ + const struct ata_timing *t = ata_timing; + + while (xfer_mode > t->mode) + t++; + + if (xfer_mode == t->mode) + return t; + + WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", + __func__, xfer_mode); + + return NULL; +} +EXPORT_SYMBOL_GPL(ata_timing_find_mode); + +int ata_timing_compute(struct ata_device *adev, unsigned short speed, + struct ata_timing *t, int T, int UT) +{ + const u16 *id = adev->id; + const struct ata_timing *s; + struct ata_timing p; + + /* + * Find the mode. + */ + s = ata_timing_find_mode(speed); + if (!s) + return -EINVAL; + + memcpy(t, s, sizeof(*s)); + + /* + * If the drive is an EIDE drive, it can tell us it needs extended + * PIO/MW_DMA cycle timing. + */ + + if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ + memset(&p, 0, sizeof(p)); + + if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; + else if ((speed <= XFER_PIO_4) || + (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + p.cycle = id[ATA_ID_EIDE_DMA_MIN]; + + ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); + } + + /* + * Convert the timing to bus clock counts. + */ + + ata_timing_quantize(t, t, T, UT); + + /* + * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, + * S.M.A.R.T * and some other commands. We have to ensure that the + * DMA cycle timing is slower/equal than the fastest PIO timing. + */ + + if (speed > XFER_PIO_6) { + ata_timing_compute(adev, adev->pio_mode, &p, T, UT); + ata_timing_merge(&p, t, t, ATA_TIMING_ALL); + } + + /* + * Lengthen active & recovery time so that cycle time is correct. + */ + + if (t->act8b + t->rec8b < t->cyc8b) { + t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; + t->rec8b = t->cyc8b - t->act8b; + } + + if (t->active + t->recover < t->cycle) { + t->active += (t->cycle - (t->active + t->recover)) / 2; + t->recover = t->cycle - t->active; + } + + /* + * In a few cases quantisation may produce enough errors to + * leave t->cycle too low for the sum of active and recovery + * if so we must correct this. + */ + if (t->active + t->recover > t->cycle) + t->cycle = t->active + t->recover; + + return 0; +} +EXPORT_SYMBOL_GPL(ata_timing_compute); diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c new file mode 100644 index 000000000000..c16423e44525 --- /dev/null +++ b/drivers/ata/libata-sata.c @@ -0,0 +1,1483 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SATA specific part of ATA helper library + * + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + * Copyright 2006 Tejun Heo <htejun@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <linux/libata.h> + +#include "libata.h" +#include "libata-transport.h" + +/* debounce timing parameters in msecs { interval, duration, timeout } */ +const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; +EXPORT_SYMBOL_GPL(sata_deb_timing_normal); +const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; +EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); +const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; +EXPORT_SYMBOL_GPL(sata_deb_timing_long); + +/** + * sata_scr_valid - test whether SCRs are accessible + * @link: ATA link to test SCR accessibility for + * + * Test whether SCRs are accessible for @link. + * + * LOCKING: + * None. + * + * RETURNS: + * 1 if SCRs are accessible, 0 otherwise. + */ +int sata_scr_valid(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + + return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; +} +EXPORT_SYMBOL_GPL(sata_scr_valid); + +/** + * sata_scr_read - read SCR register of the specified port + * @link: ATA link to read SCR for + * @reg: SCR to read + * @val: Place to store read value + * + * Read SCR register @reg of @link into *@val. This function is + * guaranteed to succeed if @link is ap->link, the cable type of + * the port is SATA and the port implements ->scr_read. + * + * LOCKING: + * None if @link is ap->link. Kernel thread context otherwise. + * + * RETURNS: + * 0 on success, negative errno on failure. + */ +int sata_scr_read(struct ata_link *link, int reg, u32 *val) +{ + if (ata_is_host_link(link)) { + if (sata_scr_valid(link)) + return link->ap->ops->scr_read(link, reg, val); + return -EOPNOTSUPP; + } + + return sata_pmp_scr_read(link, reg, val); +} +EXPORT_SYMBOL_GPL(sata_scr_read); + +/** + * sata_scr_write - write SCR register of the specified port + * @link: ATA link to write SCR for + * @reg: SCR to write + * @val: value to write + * + * Write @val to SCR register @reg of @link. This function is + * guaranteed to succeed if @link is ap->link, the cable type of + * the port is SATA and the port implements ->scr_read. + * + * LOCKING: + * None if @link is ap->link. Kernel thread context otherwise. + * + * RETURNS: + * 0 on success, negative errno on failure. + */ +int sata_scr_write(struct ata_link *link, int reg, u32 val) +{ + if (ata_is_host_link(link)) { + if (sata_scr_valid(link)) + return link->ap->ops->scr_write(link, reg, val); + return -EOPNOTSUPP; + } + + return sata_pmp_scr_write(link, reg, val); +} +EXPORT_SYMBOL_GPL(sata_scr_write); + +/** + * sata_scr_write_flush - write SCR register of the specified port and flush + * @link: ATA link to write SCR for + * @reg: SCR to write + * @val: value to write + * + * This function is identical to sata_scr_write() except that this + * function performs flush after writing to the register. + * + * LOCKING: + * None if @link is ap->link. Kernel thread context otherwise. + * + * RETURNS: + * 0 on success, negative errno on failure. + */ +int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) +{ + if (ata_is_host_link(link)) { + int rc; + + if (sata_scr_valid(link)) { + rc = link->ap->ops->scr_write(link, reg, val); + if (rc == 0) + rc = link->ap->ops->scr_read(link, reg, &val); + return rc; + } + return -EOPNOTSUPP; + } + + return sata_pmp_scr_write(link, reg, val); +} +EXPORT_SYMBOL_GPL(sata_scr_write_flush); + +/** + * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure + * @tf: Taskfile to convert + * @pmp: Port multiplier port + * @is_cmd: This FIS is for command + * @fis: Buffer into which data will output + * + * Converts a standard ATA taskfile to a Serial ATA + * FIS structure (Register - Host to Device). + * + * LOCKING: + * Inherited from caller. + */ +void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) +{ + fis[0] = 0x27; /* Register - Host to Device FIS */ + fis[1] = pmp & 0xf; /* Port multiplier number*/ + if (is_cmd) + fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ + + fis[2] = tf->command; + fis[3] = tf->feature; + + fis[4] = tf->lbal; + fis[5] = tf->lbam; + fis[6] = tf->lbah; + fis[7] = tf->device; + + fis[8] = tf->hob_lbal; + fis[9] = tf->hob_lbam; + fis[10] = tf->hob_lbah; + fis[11] = tf->hob_feature; + + fis[12] = tf->nsect; + fis[13] = tf->hob_nsect; + fis[14] = 0; + fis[15] = tf->ctl; + + fis[16] = tf->auxiliary & 0xff; + fis[17] = (tf->auxiliary >> 8) & 0xff; + fis[18] = (tf->auxiliary >> 16) & 0xff; + fis[19] = (tf->auxiliary >> 24) & 0xff; +} +EXPORT_SYMBOL_GPL(ata_tf_to_fis); + +/** + * ata_tf_from_fis - Convert SATA FIS to ATA taskfile + * @fis: Buffer from which data will be input + * @tf: Taskfile to output + * + * Converts a serial ATA FIS structure to a standard ATA taskfile. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) +{ + tf->command = fis[2]; /* status */ + tf->feature = fis[3]; /* error */ + + tf->lbal = fis[4]; + tf->lbam = fis[5]; + tf->lbah = fis[6]; + tf->device = fis[7]; + + tf->hob_lbal = fis[8]; + tf->hob_lbam = fis[9]; + tf->hob_lbah = fis[10]; + + tf->nsect = fis[12]; + tf->hob_nsect = fis[13]; +} +EXPORT_SYMBOL_GPL(ata_tf_from_fis); + +/** + * sata_link_debounce - debounce SATA phy status + * @link: ATA link to debounce SATA phy status for + * @params: timing parameters { interval, duration, timeout } in msec + * @deadline: deadline jiffies for the operation + * + * Make sure SStatus of @link reaches stable state, determined by + * holding the same value where DET is not 1 for @duration polled + * every @interval, before @timeout. Timeout constraints the + * beginning of the stable state. Because DET gets stuck at 1 on + * some controllers after hot unplugging, this functions waits + * until timeout then returns 0 if DET is stable at 1. + * + * @timeout is further limited by @deadline. The sooner of the + * two is used. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_link_debounce(struct ata_link *link, const unsigned long *params, + unsigned long deadline) +{ + unsigned long interval = params[0]; + unsigned long duration = params[1]; + unsigned long last_jiffies, t; + u32 last, cur; + int rc; + + t = ata_deadline(jiffies, params[2]); + if (time_before(t, deadline)) + deadline = t; + + if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) + return rc; + cur &= 0xf; + + last = cur; + last_jiffies = jiffies; + + while (1) { + ata_msleep(link->ap, interval); + if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) + return rc; + cur &= 0xf; + + /* DET stable? */ + if (cur == last) { + if (cur == 1 && time_before(jiffies, deadline)) + continue; + if (time_after(jiffies, + ata_deadline(last_jiffies, duration))) + return 0; + continue; + } + + /* unstable, start over */ + last = cur; + last_jiffies = jiffies; + + /* Check deadline. If debouncing failed, return + * -EPIPE to tell upper layer to lower link speed. + */ + if (time_after(jiffies, deadline)) + return -EPIPE; + } +} +EXPORT_SYMBOL_GPL(sata_link_debounce); + +/** + * sata_link_resume - resume SATA link + * @link: ATA link to resume SATA + * @params: timing parameters { interval, duration, timeout } in msec + * @deadline: deadline jiffies for the operation + * + * Resume SATA phy @link and debounce it. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline) +{ + int tries = ATA_LINK_RESUME_TRIES; + u32 scontrol, serror; + int rc; + + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + return rc; + + /* + * Writes to SControl sometimes get ignored under certain + * controllers (ata_piix SIDPR). Make sure DET actually is + * cleared. + */ + do { + scontrol = (scontrol & 0x0f0) | 0x300; + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + return rc; + /* + * Some PHYs react badly if SStatus is pounded + * immediately after resuming. Delay 200ms before + * debouncing. + */ + if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) + ata_msleep(link->ap, 200); + + /* is SControl restored correctly? */ + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + return rc; + } while ((scontrol & 0xf0f) != 0x300 && --tries); + + if ((scontrol & 0xf0f) != 0x300) { + ata_link_warn(link, "failed to resume link (SControl %X)\n", + scontrol); + return 0; + } + + if (tries < ATA_LINK_RESUME_TRIES) + ata_link_warn(link, "link resume succeeded after %d retries\n", + ATA_LINK_RESUME_TRIES - tries); + + if ((rc = sata_link_debounce(link, params, deadline))) + return rc; + + /* clear SError, some PHYs require this even for SRST to work */ + if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) + rc = sata_scr_write(link, SCR_ERROR, serror); + + return rc != -EINVAL ? rc : 0; +} +EXPORT_SYMBOL_GPL(sata_link_resume); + +/** + * sata_link_scr_lpm - manipulate SControl IPM and SPM fields + * @link: ATA link to manipulate SControl for + * @policy: LPM policy to configure + * @spm_wakeup: initiate LPM transition to active state + * + * Manipulate the IPM field of the SControl register of @link + * according to @policy. If @policy is ATA_LPM_MAX_POWER and + * @spm_wakeup is %true, the SPM field is manipulated to wake up + * the link. This function also clears PHYRDY_CHG before + * returning. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup) +{ + struct ata_eh_context *ehc = &link->eh_context; + bool woken_up = false; + u32 scontrol; + int rc; + + rc = sata_scr_read(link, SCR_CONTROL, &scontrol); + if (rc) + return rc; + + switch (policy) { + case ATA_LPM_MAX_POWER: + /* disable all LPM transitions */ + scontrol |= (0x7 << 8); + /* initiate transition to active state */ + if (spm_wakeup) { + scontrol |= (0x4 << 12); + woken_up = true; + } + break; + case ATA_LPM_MED_POWER: + /* allow LPM to PARTIAL */ + scontrol &= ~(0x1 << 8); + scontrol |= (0x6 << 8); + break; + case ATA_LPM_MED_POWER_WITH_DIPM: + case ATA_LPM_MIN_POWER_WITH_PARTIAL: + case ATA_LPM_MIN_POWER: + if (ata_link_nr_enabled(link) > 0) + /* no restrictions on LPM transitions */ + scontrol &= ~(0x7 << 8); + else { + /* empty port, power off */ + scontrol &= ~0xf; + scontrol |= (0x1 << 2); + } + break; + default: + WARN_ON(1); + } + + rc = sata_scr_write(link, SCR_CONTROL, scontrol); + if (rc) + return rc; + + /* give the link time to transit out of LPM state */ + if (woken_up) + msleep(10); + + /* clear PHYRDY_CHG from SError */ + ehc->i.serror &= ~SERR_PHYRDY_CHG; + return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); +} +EXPORT_SYMBOL_GPL(sata_link_scr_lpm); + +static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) +{ + struct ata_link *host_link = &link->ap->link; + u32 limit, target, spd; + + limit = link->sata_spd_limit; + + /* Don't configure downstream link faster than upstream link. + * It doesn't speed up anything and some PMPs choke on such + * configuration. + */ + if (!ata_is_host_link(link) && host_link->sata_spd) + limit &= (1 << host_link->sata_spd) - 1; + + if (limit == UINT_MAX) + target = 0; + else + target = fls(limit); + + spd = (*scontrol >> 4) & 0xf; + *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); + + return spd != target; +} + +/** + * sata_set_spd_needed - is SATA spd configuration needed + * @link: Link in question + * + * Test whether the spd limit in SControl matches + * @link->sata_spd_limit. This function is used to determine + * whether hardreset is necessary to apply SATA spd + * configuration. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 1 if SATA spd configuration is needed, 0 otherwise. + */ +static int sata_set_spd_needed(struct ata_link *link) +{ + u32 scontrol; + + if (sata_scr_read(link, SCR_CONTROL, &scontrol)) + return 1; + + return __sata_set_spd_needed(link, &scontrol); +} + +/** + * sata_set_spd - set SATA spd according to spd limit + * @link: Link to set SATA spd for + * + * Set SATA spd of @link according to sata_spd_limit. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 0 if spd doesn't need to be changed, 1 if spd has been + * changed. Negative errno if SCR registers are inaccessible. + */ +int sata_set_spd(struct ata_link *link) +{ + u32 scontrol; + int rc; + + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + return rc; + + if (!__sata_set_spd_needed(link, &scontrol)) + return 0; + + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + return rc; + + return 1; +} +EXPORT_SYMBOL_GPL(sata_set_spd); + +/** + * sata_link_hardreset - reset link via SATA phy reset + * @link: link to reset + * @timing: timing parameters { interval, duration, timeout } in msec + * @deadline: deadline jiffies for the operation + * @online: optional out parameter indicating link onlineness + * @check_ready: optional callback to check link readiness + * + * SATA phy-reset @link using DET bits of SControl register. + * After hardreset, link readiness is waited upon using + * ata_wait_ready() if @check_ready is specified. LLDs are + * allowed to not specify @check_ready and wait itself after this + * function returns. Device classification is LLD's + * responsibility. + * + * *@online is set to one iff reset succeeded and @link is online + * after reset. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, + unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)) +{ + u32 scontrol; + int rc; + + DPRINTK("ENTER\n"); + + if (online) + *online = false; + + if (sata_set_spd_needed(link)) { + /* SATA spec says nothing about how to reconfigure + * spd. To be on the safe side, turn off phy during + * reconfiguration. This works for at least ICH7 AHCI + * and Sil3124. + */ + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + goto out; + + scontrol = (scontrol & 0x0f0) | 0x304; + + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + goto out; + + sata_set_spd(link); + } + + /* issue phy wake/reset */ + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + goto out; + + scontrol = (scontrol & 0x0f0) | 0x301; + + if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) + goto out; + + /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 + * 10.4.2 says at least 1 ms. + */ + ata_msleep(link->ap, 1); + + /* bring link back */ + rc = sata_link_resume(link, timing, deadline); + if (rc) + goto out; + /* if link is offline nothing more to do */ + if (ata_phys_link_offline(link)) + goto out; + + /* Link is online. From this point, -ENODEV too is an error. */ + if (online) + *online = true; + + if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { + /* If PMP is supported, we have to do follow-up SRST. + * Some PMPs don't send D2H Reg FIS after hardreset if + * the first port is empty. Wait only for + * ATA_TMOUT_PMP_SRST_WAIT. + */ + if (check_ready) { + unsigned long pmp_deadline; + + pmp_deadline = ata_deadline(jiffies, + ATA_TMOUT_PMP_SRST_WAIT); + if (time_after(pmp_deadline, deadline)) + pmp_deadline = deadline; + ata_wait_ready(link, pmp_deadline, check_ready); + } + rc = -EAGAIN; + goto out; + } + + rc = 0; + if (check_ready) + rc = ata_wait_ready(link, deadline, check_ready); + out: + if (rc && rc != -EAGAIN) { + /* online is set iff link is online && reset succeeded */ + if (online) + *online = false; + ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); + } + DPRINTK("EXIT, rc=%d\n", rc); + return rc; +} +EXPORT_SYMBOL_GPL(sata_link_hardreset); + +/** + * ata_qc_complete_multiple - Complete multiple qcs successfully + * @ap: port in question + * @qc_active: new qc_active mask + * + * Complete in-flight commands. This functions is meant to be + * called from low-level driver's interrupt routine to complete + * requests normally. ap->qc_active and @qc_active is compared + * and commands are completed accordingly. + * + * Always use this function when completing multiple NCQ commands + * from IRQ handlers instead of calling ata_qc_complete() + * multiple times to keep IRQ expect status properly in sync. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Number of completed commands on success, -errno otherwise. + */ +int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) +{ + u64 done_mask, ap_qc_active = ap->qc_active; + int nr_done = 0; + + /* + * If the internal tag is set on ap->qc_active, then we care about + * bit0 on the passed in qc_active mask. Move that bit up to match + * the internal tag. + */ + if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; + qc_active ^= qc_active & 0x01; + } + + done_mask = ap_qc_active ^ qc_active; + + if (unlikely(done_mask & qc_active)) { + ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", + ap->qc_active, qc_active); + return -EINVAL; + } + + while (done_mask) { + struct ata_queued_cmd *qc; + unsigned int tag = __ffs64(done_mask); + + qc = ata_qc_from_tag(ap, tag); + if (qc) { + ata_qc_complete(qc); + nr_done++; + } + done_mask &= ~(1ULL << tag); + } + + return nr_done; +} +EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); + +/** + * ata_slave_link_init - initialize slave link + * @ap: port to initialize slave link for + * + * Create and initialize slave link for @ap. This enables slave + * link handling on the port. + * + * In libata, a port contains links and a link contains devices. + * There is single host link but if a PMP is attached to it, + * there can be multiple fan-out links. On SATA, there's usually + * a single device connected to a link but PATA and SATA + * controllers emulating TF based interface can have two - master + * and slave. + * + * However, there are a few controllers which don't fit into this + * abstraction too well - SATA controllers which emulate TF + * interface with both master and slave devices but also have + * separate SCR register sets for each device. These controllers + * need separate links for physical link handling + * (e.g. onlineness, link speed) but should be treated like a + * traditional M/S controller for everything else (e.g. command + * issue, softreset). + * + * slave_link is libata's way of handling this class of + * controllers without impacting core layer too much. For + * anything other than physical link handling, the default host + * link is used for both master and slave. For physical link + * handling, separate @ap->slave_link is used. All dirty details + * are implemented inside libata core layer. From LLD's POV, the + * only difference is that prereset, hardreset and postreset are + * called once more for the slave link, so the reset sequence + * looks like the following. + * + * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> + * softreset(M) -> postreset(M) -> postreset(S) + * + * Note that softreset is called only for the master. Softreset + * resets both M/S by definition, so SRST on master should handle + * both (the standard method will work just fine). + * + * LOCKING: + * Should be called before host is registered. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int ata_slave_link_init(struct ata_port *ap) +{ + struct ata_link *link; + + WARN_ON(ap->slave_link); + WARN_ON(ap->flags & ATA_FLAG_PMP); + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + ata_link_init(ap, link, 1); + ap->slave_link = link; + return 0; +} +EXPORT_SYMBOL_GPL(ata_slave_link_init); + +/** + * sata_lpm_ignore_phy_events - test if PHY event should be ignored + * @link: Link receiving the event + * + * Test whether the received PHY event has to be ignored or not. + * + * LOCKING: + * None: + * + * RETURNS: + * True if the event has to be ignored. + */ +bool sata_lpm_ignore_phy_events(struct ata_link *link) +{ + unsigned long lpm_timeout = link->last_lpm_change + + msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); + + /* if LPM is enabled, PHYRDY doesn't mean anything */ + if (link->lpm_policy > ATA_LPM_MAX_POWER) + return true; + + /* ignore the first PHY event after the LPM policy changed + * as it is might be spurious + */ + if ((link->flags & ATA_LFLAG_CHANGED) && + time_before(jiffies, lpm_timeout)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); + +static const char *ata_lpm_policy_names[] = { + [ATA_LPM_UNKNOWN] = "max_performance", + [ATA_LPM_MAX_POWER] = "max_performance", + [ATA_LPM_MED_POWER] = "medium_power", + [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm", + [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial", + [ATA_LPM_MIN_POWER] = "min_power", +}; + +static ssize_t ata_scsi_lpm_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(device); + struct ata_port *ap = ata_shost_to_port(shost); + struct ata_link *link; + struct ata_device *dev; + enum ata_lpm_policy policy; + unsigned long flags; + + /* UNKNOWN is internal state, iterate from MAX_POWER */ + for (policy = ATA_LPM_MAX_POWER; + policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { + const char *name = ata_lpm_policy_names[policy]; + + if (strncmp(name, buf, strlen(name)) == 0) + break; + } + if (policy == ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (dev->horkage & ATA_HORKAGE_NOLPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + } + } + + ap->target_lpm_policy = policy; + ata_port_schedule_eh(ap); +out_unlock: + spin_unlock_irqrestore(ap->lock, flags); + return count; +} + +static ssize_t ata_scsi_lpm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%s\n", + ata_lpm_policy_names[ap->target_lpm_policy]); +} +DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, + ata_scsi_lpm_show, ata_scsi_lpm_store); +EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); + +static ssize_t ata_ncq_prio_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_device *dev; + bool ncq_prio_enable; + int rc = 0; + + ap = ata_shost_to_port(sdev->host); + + spin_lock_irq(ap->lock); + dev = ata_scsi_find_dev(ap, sdev); + if (!dev) { + rc = -ENODEV; + goto unlock; + } + + ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE; + +unlock: + spin_unlock_irq(ap->lock); + + return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable); +} + +static ssize_t ata_ncq_prio_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_device *dev; + long int input; + int rc; + + rc = kstrtol(buf, 10, &input); + if (rc) + return rc; + if ((input < 0) || (input > 1)) + return -EINVAL; + + ap = ata_shost_to_port(sdev->host); + dev = ata_scsi_find_dev(ap, sdev); + if (unlikely(!dev)) + return -ENODEV; + + spin_lock_irq(ap->lock); + if (input) + dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE; + else + dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; + + dev->link->eh_info.action |= ATA_EH_REVALIDATE; + dev->link->eh_info.flags |= ATA_EHI_QUIET; + ata_port_schedule_eh(ap); + spin_unlock_irq(ap->lock); + + ata_port_wait_eh(ap); + + if (input) { + spin_lock_irq(ap->lock); + if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { + dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; + rc = -EIO; + } + spin_unlock_irq(ap->lock); + } + + return rc ? rc : len; +} + +DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, + ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); +EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); + +struct device_attribute *ata_ncq_sdev_attrs[] = { + &dev_attr_unload_heads, + &dev_attr_ncq_prio_enable, + NULL +}; +EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs); + +static ssize_t +ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_store(ap, buf, count); + return -EINVAL; +} + +static ssize_t +ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_show(ap, buf); + return -EINVAL; +} +DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, + ata_scsi_em_message_show, ata_scsi_em_message_store); +EXPORT_SYMBOL_GPL(dev_attr_em_message); + +static ssize_t +ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + return snprintf(buf, 23, "%d\n", ap->em_message_type); +} +DEVICE_ATTR(em_message_type, S_IRUGO, + ata_scsi_em_message_type_show, NULL); +EXPORT_SYMBOL_GPL(dev_attr_em_message_type); + +static ssize_t +ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) + return ap->ops->sw_activity_show(atadev, buf); + return -EINVAL; +} + +static ssize_t +ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + enum sw_activity val; + int rc; + + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + val = simple_strtoul(buf, NULL, 0); + switch (val) { + case OFF: case BLINK_ON: case BLINK_OFF: + rc = ap->ops->sw_activity_store(atadev, val); + if (!rc) + return count; + else + return rc; + } + } + return -EINVAL; +} +DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, + ata_scsi_activity_store); +EXPORT_SYMBOL_GPL(dev_attr_sw_activity); + +/** + * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth + * @ap: ATA port to which the device change the queue depth + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * + * libsas and libata have different approaches for associating a sdev to + * its ata_port. + * + */ +int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth) +{ + struct ata_device *dev; + unsigned long flags; + + if (queue_depth < 1 || queue_depth == sdev->queue_depth) + return sdev->queue_depth; + + dev = ata_scsi_find_dev(ap, sdev); + if (!dev || !ata_dev_enabled(dev)) + return sdev->queue_depth; + + /* NCQ enabled? */ + spin_lock_irqsave(ap->lock, flags); + dev->flags &= ~ATA_DFLAG_NCQ_OFF; + if (queue_depth == 1 || !ata_ncq_enabled(dev)) { + dev->flags |= ATA_DFLAG_NCQ_OFF; + queue_depth = 1; + } + spin_unlock_irqrestore(ap->lock, flags); + + /* limit and apply queue depth */ + queue_depth = min(queue_depth, sdev->host->can_queue); + queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); + queue_depth = min(queue_depth, ATA_MAX_QUEUE); + + if (sdev->queue_depth == queue_depth) + return -EINVAL; + + return scsi_change_queue_depth(sdev, queue_depth); +} +EXPORT_SYMBOL_GPL(__ata_change_queue_depth); + +/** + * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * + * This is libata standard hostt->change_queue_depth callback. + * SCSI will call into this callback when user tries to set queue + * depth via sysfs. + * + * LOCKING: + * SCSI layer (we don't care) + * + * RETURNS: + * Newly configured queue depth. + */ +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + + return __ata_change_queue_depth(ap, sdev, queue_depth); +} +EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); + +/** + * port_alloc - Allocate port for a SAS attached SATA device + * @host: ATA host container for all SAS ports + * @port_info: Information from low-level host driver + * @shost: SCSI host that the scsi device is attached to + * + * LOCKING: + * PCI/etc. bus probe sem. + * + * RETURNS: + * ata_port pointer on success / NULL on failure. + */ + +struct ata_port *ata_sas_port_alloc(struct ata_host *host, + struct ata_port_info *port_info, + struct Scsi_Host *shost) +{ + struct ata_port *ap; + + ap = ata_port_alloc(host); + if (!ap) + return NULL; + + ap->port_no = 0; + ap->lock = &host->lock; + ap->pio_mask = port_info->pio_mask; + ap->mwdma_mask = port_info->mwdma_mask; + ap->udma_mask = port_info->udma_mask; + ap->flags |= port_info->flags; + ap->ops = port_info->port_ops; + ap->cbl = ATA_CBL_SATA; + + return ap; +} +EXPORT_SYMBOL_GPL(ata_sas_port_alloc); + +/** + * ata_sas_port_start - Set port up for dma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. + * + * May be used as the port_start() entry in ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +int ata_sas_port_start(struct ata_port *ap) +{ + /* + * the port is marked as frozen at allocation time, but if we don't + * have new eh, we won't thaw it + */ + if (!ap->ops->error_handler) + ap->pflags &= ~ATA_PFLAG_FROZEN; + return 0; +} +EXPORT_SYMBOL_GPL(ata_sas_port_start); + +/** + * ata_port_stop - Undo ata_sas_port_start() + * @ap: Port to shut down + * + * May be used as the port_stop() entry in ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_sas_port_stop(struct ata_port *ap) +{ +} +EXPORT_SYMBOL_GPL(ata_sas_port_stop); + +/** + * ata_sas_async_probe - simply schedule probing and return + * @ap: Port to probe + * + * For batch scheduling of probe for sas attached ata devices, assumes + * the port has already been through ata_sas_port_init() + */ +void ata_sas_async_probe(struct ata_port *ap) +{ + __ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_async_probe); + +int ata_sas_sync_probe(struct ata_port *ap) +{ + return ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_sync_probe); + + +/** + * ata_sas_port_init - Initialize a SATA device + * @ap: SATA port to initialize + * + * LOCKING: + * PCI/etc. bus probe sem. + * + * RETURNS: + * Zero on success, non-zero on error. + */ + +int ata_sas_port_init(struct ata_port *ap) +{ + int rc = ap->ops->port_start(ap); + + if (rc) + return rc; + ap->print_id = atomic_inc_return(&ata_print_id); + return 0; +} +EXPORT_SYMBOL_GPL(ata_sas_port_init); + +int ata_sas_tport_add(struct device *parent, struct ata_port *ap) +{ + return ata_tport_add(parent, ap); +} +EXPORT_SYMBOL_GPL(ata_sas_tport_add); + +void ata_sas_tport_delete(struct ata_port *ap) +{ + ata_tport_delete(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_tport_delete); + +/** + * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc + * @ap: SATA port to destroy + * + */ + +void ata_sas_port_destroy(struct ata_port *ap) +{ + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + kfree(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_port_destroy); + +/** + * ata_sas_slave_configure - Default slave_config routine for libata devices + * @sdev: SCSI device to configure + * @ap: ATA port to which SCSI device is attached + * + * RETURNS: + * Zero. + */ + +int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) +{ + ata_scsi_sdev_config(sdev); + ata_scsi_dev_config(sdev, ap->link.device); + return 0; +} +EXPORT_SYMBOL_GPL(ata_sas_slave_configure); + +/** + * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device + * @cmd: SCSI command to be sent + * @ap: ATA port to which the command is being sent + * + * RETURNS: + * Return value from __ata_scsi_queuecmd() if @cmd can be queued, + * 0 otherwise. + */ + +int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) +{ + int rc = 0; + + ata_scsi_dump_cdb(ap, cmd); + + if (likely(ata_dev_enabled(ap->link.device))) + rc = __ata_scsi_queuecmd(cmd, ap->link.device); + else { + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + } + return rc; +} +EXPORT_SYMBOL_GPL(ata_sas_queuecmd); + +int ata_sas_allocate_tag(struct ata_port *ap) +{ + unsigned int max_queue = ap->host->n_tags; + unsigned int i, tag; + + for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { + tag = tag < max_queue ? tag : 0; + + /* the last tag is reserved for internal command. */ + if (ata_tag_internal(tag)) + continue; + + if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { + ap->sas_last_tag = tag; + return tag; + } + } + return -1; +} + +void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) +{ + clear_bit(tag, &ap->sas_tag_allocated); +} + +/** + * sata_async_notification - SATA async notification handler + * @ap: ATA port where async notification is received + * + * Handler to be called when async notification via SDB FIS is + * received. This function schedules EH if necessary. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * 1 if EH is scheduled, 0 otherwise. + */ +int sata_async_notification(struct ata_port *ap) +{ + u32 sntf; + int rc; + + if (!(ap->flags & ATA_FLAG_AN)) + return 0; + + rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); + if (rc == 0) + sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); + + if (!sata_pmp_attached(ap) || rc) { + /* PMP is not attached or SNTF is not available */ + if (!sata_pmp_attached(ap)) { + /* PMP is not attached. Check whether ATAPI + * AN is configured. If so, notify media + * change. + */ + struct ata_device *dev = ap->link.device; + + if ((dev->class == ATA_DEV_ATAPI) && + (dev->flags & ATA_DFLAG_AN)) + ata_scsi_media_change_notify(dev); + return 0; + } else { + /* PMP is attached but SNTF is not available. + * ATAPI async media change notification is + * not used. The PMP must be reporting PHY + * status change, schedule EH. + */ + ata_port_schedule_eh(ap); + return 1; + } + } else { + /* PMP is attached and SNTF is available */ + struct ata_link *link; + + /* check and notify ATAPI AN */ + ata_for_each_link(link, ap, EDGE) { + if (!(sntf & (1 << link->pmp))) + continue; + + if ((link->device->class == ATA_DEV_ATAPI) && + (link->device->flags & ATA_DFLAG_AN)) + ata_scsi_media_change_notify(link->device); + } + + /* If PMP is reporting that PHY status of some + * downstream ports has changed, schedule EH. + */ + if (sntf & (1 << SATA_PMP_CTRL_PORT)) { + ata_port_schedule_eh(ap); + return 1; + } + + return 0; + } +} +EXPORT_SYMBOL_GPL(sata_async_notification); + +/** + * ata_eh_read_log_10h - Read log page 10h for NCQ error details + * @dev: Device to read log page 10h from + * @tag: Resulting tag of the failed command + * @tf: Resulting taskfile registers of the failed command + * + * Read log page 10h to obtain NCQ error details and clear error + * condition. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int ata_eh_read_log_10h(struct ata_device *dev, + int *tag, struct ata_taskfile *tf) +{ + u8 *buf = dev->link->ap->sector_buf; + unsigned int err_mask; + u8 csum; + int i; + + err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); + if (err_mask) + return -EIO; + + csum = 0; + for (i = 0; i < ATA_SECT_SIZE; i++) + csum += buf[i]; + if (csum) + ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", + csum); + + if (buf[0] & 0x80) + return -ENOENT; + + *tag = buf[0] & 0x1f; + + tf->command = buf[2]; + tf->feature = buf[3]; + tf->lbal = buf[4]; + tf->lbam = buf[5]; + tf->lbah = buf[6]; + tf->device = buf[7]; + tf->hob_lbal = buf[8]; + tf->hob_lbam = buf[9]; + tf->hob_lbah = buf[10]; + tf->nsect = buf[12]; + tf->hob_nsect = buf[13]; + if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) + tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; + + return 0; +} + +/** + * ata_eh_analyze_ncq_error - analyze NCQ error + * @link: ATA link to analyze NCQ error for + * + * Read log page 10h, determine the offending qc and acquire + * error status TF. For NCQ device errors, all LLDDs have to do + * is setting AC_ERR_DEV in ehi->err_mask. This function takes + * care of the rest. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_eh_analyze_ncq_error(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev = link->device; + struct ata_queued_cmd *qc; + struct ata_taskfile tf; + int tag, rc; + + /* if frozen, we can't do much */ + if (ap->pflags & ATA_PFLAG_FROZEN) + return; + + /* is it NCQ device error? */ + if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) + return; + + /* has LLDD analyzed already? */ + ata_qc_for_each_raw(ap, qc, tag) { + if (!(qc->flags & ATA_QCFLAG_FAILED)) + continue; + + if (qc->err_mask) + return; + } + + /* okay, this error is ours */ + memset(&tf, 0, sizeof(tf)); + rc = ata_eh_read_log_10h(dev, &tag, &tf); + if (rc) { + ata_link_err(link, "failed to read log page 10h (errno=%d)\n", + rc); + return; + } + + if (!(link->sactive & (1 << tag))) { + ata_link_err(link, "log page 10h reported inactive tag %d\n", + tag); + return; + } + + /* we've got the perpetrator, condemn it */ + qc = __ata_qc_from_tag(ap, tag); + memcpy(&qc->result_tf, &tf, sizeof(tf)); + qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; + qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; + if (dev->class == ATA_DEV_ZAC && + ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { + char sense_key, asc, ascq; + + sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; + asc = (qc->result_tf.auxiliary >> 8) & 0xff; + ascq = qc->result_tf.auxiliary & 0xff; + ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq); + ata_scsi_set_sense_information(dev, qc->scsicmd, + &qc->result_tf); + qc->flags |= ATA_QCFLAG_SENSE_VALID; + } + + ehc->i.err_mask &= ~AC_ERR_DEV; +} +EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index eb2eb599e602..36e588d88b95 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2,10 +2,6 @@ /* * libata-scsi.c - helper library for ATA * - * Maintained by: Tejun Heo <tj@kernel.org> - * Please ALWAYS copy linux-ide@vger.kernel.org - * on emails. - * * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * @@ -36,11 +32,12 @@ #include <linux/suspend.h> #include <asm/unaligned.h> #include <linux/ioprio.h> +#include <linux/of.h> #include "libata.h" #include "libata-transport.h" -#define ATA_SCSI_RBUF_SIZE 4096 +#define ATA_SCSI_RBUF_SIZE 576 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; @@ -49,8 +46,6 @@ typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, - const struct scsi_device *scsidev); #define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE_LEN 12 @@ -90,71 +85,6 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 0, 30 /* extended self test time, see 05-359r1 */ }; -static const char *ata_lpm_policy_names[] = { - [ATA_LPM_UNKNOWN] = "max_performance", - [ATA_LPM_MAX_POWER] = "max_performance", - [ATA_LPM_MED_POWER] = "medium_power", - [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm", - [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial", - [ATA_LPM_MIN_POWER] = "min_power", -}; - -static ssize_t ata_scsi_lpm_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(device); - struct ata_port *ap = ata_shost_to_port(shost); - struct ata_link *link; - struct ata_device *dev; - enum ata_lpm_policy policy; - unsigned long flags; - - /* UNKNOWN is internal state, iterate from MAX_POWER */ - for (policy = ATA_LPM_MAX_POWER; - policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { - const char *name = ata_lpm_policy_names[policy]; - - if (strncmp(name, buf, strlen(name)) == 0) - break; - } - if (policy == ARRAY_SIZE(ata_lpm_policy_names)) - return -EINVAL; - - spin_lock_irqsave(ap->lock, flags); - - ata_for_each_link(link, ap, EDGE) { - ata_for_each_dev(dev, &ap->link, ENABLED) { - if (dev->horkage & ATA_HORKAGE_NOLPM) { - count = -EOPNOTSUPP; - goto out_unlock; - } - } - } - - ap->target_lpm_policy = policy; - ata_port_schedule_eh(ap); -out_unlock: - spin_unlock_irqrestore(ap->lock, flags); - return count; -} - -static ssize_t ata_scsi_lpm_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - - if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) - return -EINVAL; - - return snprintf(buf, PAGE_SIZE, "%s\n", - ata_lpm_policy_names[ap->target_lpm_policy]); -} -DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, - ata_scsi_lpm_show, ata_scsi_lpm_store); -EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); - static ssize_t ata_scsi_park_show(struct device *device, struct device_attribute *attr, char *buf) { @@ -258,83 +188,6 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, ata_scsi_park_show, ata_scsi_park_store); EXPORT_SYMBOL_GPL(dev_attr_unload_heads); -static ssize_t ata_ncq_prio_enable_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap; - struct ata_device *dev; - bool ncq_prio_enable; - int rc = 0; - - ap = ata_shost_to_port(sdev->host); - - spin_lock_irq(ap->lock); - dev = ata_scsi_find_dev(ap, sdev); - if (!dev) { - rc = -ENODEV; - goto unlock; - } - - ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE; - -unlock: - spin_unlock_irq(ap->lock); - - return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable); -} - -static ssize_t ata_ncq_prio_enable_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap; - struct ata_device *dev; - long int input; - int rc; - - rc = kstrtol(buf, 10, &input); - if (rc) - return rc; - if ((input < 0) || (input > 1)) - return -EINVAL; - - ap = ata_shost_to_port(sdev->host); - dev = ata_scsi_find_dev(ap, sdev); - if (unlikely(!dev)) - return -ENODEV; - - spin_lock_irq(ap->lock); - if (input) - dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE; - else - dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; - - dev->link->eh_info.action |= ATA_EH_REVALIDATE; - dev->link->eh_info.flags |= ATA_EHI_QUIET; - ata_port_schedule_eh(ap); - spin_unlock_irq(ap->lock); - - ata_port_wait_eh(ap); - - if (input) { - spin_lock_irq(ap->lock); - if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { - dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE; - rc = -EIO; - } - spin_unlock_irq(ap->lock); - } - - return rc ? rc : len; -} - -DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, - ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); -EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); - void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) { @@ -383,90 +236,8 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev, field, 0xff, 0); } -static ssize_t -ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) - return ap->ops->em_store(ap, buf, count); - return -EINVAL; -} - -static ssize_t -ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - - if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) - return ap->ops->em_show(ap, buf); - return -EINVAL; -} -DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, - ata_scsi_em_message_show, ata_scsi_em_message_store); -EXPORT_SYMBOL_GPL(dev_attr_em_message); - -static ssize_t -ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - - return snprintf(buf, 23, "%d\n", ap->em_message_type); -} -DEVICE_ATTR(em_message_type, S_IRUGO, - ata_scsi_em_message_type_show, NULL); -EXPORT_SYMBOL_GPL(dev_attr_em_message_type); - -static ssize_t -ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct scsi_device *sdev = to_scsi_device(dev); - struct ata_port *ap = ata_shost_to_port(sdev->host); - struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); - - if (atadev && ap->ops->sw_activity_show && - (ap->flags & ATA_FLAG_SW_ACTIVITY)) - return ap->ops->sw_activity_show(atadev, buf); - return -EINVAL; -} - -static ssize_t -ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct scsi_device *sdev = to_scsi_device(dev); - struct ata_port *ap = ata_shost_to_port(sdev->host); - struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); - enum sw_activity val; - int rc; - - if (atadev && ap->ops->sw_activity_store && - (ap->flags & ATA_FLAG_SW_ACTIVITY)) { - val = simple_strtoul(buf, NULL, 0); - switch (val) { - case OFF: case BLINK_ON: case BLINK_OFF: - rc = ap->ops->sw_activity_store(atadev, val); - if (!rc) - return count; - else - return rc; - } - } - return -EINVAL; -} -DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, - ata_scsi_activity_store); -EXPORT_SYMBOL_GPL(dev_attr_sw_activity); - struct device_attribute *ata_common_sdev_attrs[] = { &dev_attr_unload_heads, - &dev_attr_ncq_prio_enable, NULL }; EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); @@ -499,6 +270,7 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, return 0; } +EXPORT_SYMBOL_GPL(ata_std_bios_param); /** * ata_scsi_unlock_native_capacity - unlock native capacity @@ -528,6 +300,7 @@ void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh(ap); } +EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); /** * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl @@ -1215,7 +988,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); } -static void ata_scsi_sdev_config(struct scsi_device *sdev) +void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; @@ -1255,8 +1028,7 @@ static int atapi_drain_needed(struct request *rq) return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC; } -static int ata_scsi_dev_config(struct scsi_device *sdev, - struct ata_device *dev) +int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) { struct request_queue *q = sdev->request_queue; @@ -1344,6 +1116,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev) return rc; } +EXPORT_SYMBOL_GPL(ata_scsi_slave_config); /** * ata_scsi_slave_destroy - SCSI device is about to be destroyed @@ -1383,71 +1156,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) q->dma_drain_buffer = NULL; q->dma_drain_size = 0; } - -/** - * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth - * @ap: ATA port to which the device change the queue depth - * @sdev: SCSI device to configure queue depth for - * @queue_depth: new queue depth - * - * libsas and libata have different approaches for associating a sdev to - * its ata_port. - * - */ -int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, - int queue_depth) -{ - struct ata_device *dev; - unsigned long flags; - - if (queue_depth < 1 || queue_depth == sdev->queue_depth) - return sdev->queue_depth; - - dev = ata_scsi_find_dev(ap, sdev); - if (!dev || !ata_dev_enabled(dev)) - return sdev->queue_depth; - - /* NCQ enabled? */ - spin_lock_irqsave(ap->lock, flags); - dev->flags &= ~ATA_DFLAG_NCQ_OFF; - if (queue_depth == 1 || !ata_ncq_enabled(dev)) { - dev->flags |= ATA_DFLAG_NCQ_OFF; - queue_depth = 1; - } - spin_unlock_irqrestore(ap->lock, flags); - - /* limit and apply queue depth */ - queue_depth = min(queue_depth, sdev->host->can_queue); - queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); - queue_depth = min(queue_depth, ATA_MAX_QUEUE); - - if (sdev->queue_depth == queue_depth) - return -EINVAL; - - return scsi_change_queue_depth(sdev, queue_depth); -} - -/** - * ata_scsi_change_queue_depth - SCSI callback for queue depth config - * @sdev: SCSI device to configure queue depth for - * @queue_depth: new queue depth - * - * This is libata standard hostt->change_queue_depth callback. - * SCSI will call into this callback when user tries to set queue - * depth via sysfs. - * - * LOCKING: - * SCSI layer (we don't care) - * - * RETURNS: - * Newly configured queue depth. - */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) -{ - struct ata_port *ap = ata_shost_to_port(sdev->host); - - return __ata_change_queue_depth(ap, sdev, queue_depth); -} +EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); /** * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command @@ -2354,10 +2063,6 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf) */ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) { - struct ata_taskfile tf; - - memset(&tf, 0, sizeof(tf)); - rbuf[1] = 0x89; /* our page code */ rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ rbuf[3] = (0x238 & 0xff); @@ -2366,14 +2071,14 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) memcpy(&rbuf[16], "libata ", 16); memcpy(&rbuf[32], DRV_VERSION, 4); - /* we don't store the ATA device signature, so we fake it */ - - tf.command = ATA_DRDY; /* really, this is Status reg */ - tf.lbal = 0x1; - tf.nsect = 0x1; - - ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */ rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ + rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */ + /* TODO: PMP? */ + + /* we don't store the ATA device signature, so we fake it */ + rbuf[38] = ATA_DRDY; /* really, this is Status reg */ + rbuf[40] = 0x1; + rbuf[48] = 0x1; rbuf[56] = ATA_CMD_ID_ATA; @@ -3089,7 +2794,7 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, * RETURNS: * Associated ATA device, or %NULL if not found. */ -static struct ata_device * +struct ata_device * ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) { struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); @@ -4299,8 +4004,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) * Prints the contents of a SCSI command via printk(). */ -static inline void ata_scsi_dump_cdb(struct ata_port *ap, - struct scsi_cmnd *cmd) +void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd) { #ifdef ATA_VERBOSE_DEBUG struct scsi_device *scsidev = cmd->device; @@ -4312,8 +4016,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, #endif } -static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, - struct ata_device *dev) +int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { u8 scsi_op = scmd->cmnd[0]; ata_xlat_func_t xlat_func; @@ -4407,6 +4110,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) return rc; } +EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); /** * ata_scsi_simulate - simulate SCSI command on ATA device @@ -4562,26 +4266,51 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host_with_dma(ap->scsi_host, - &ap->tdev, ap->host->dev); + rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); if (rc) - goto err_add; + goto err_alloc; } return 0; - err_add: - scsi_host_put(host->ports[i]->scsi_host); err_alloc: while (--i >= 0) { struct Scsi_Host *shost = host->ports[i]->scsi_host; + /* scsi_host_put() is in ata_devres_release() */ scsi_remove_host(shost); - scsi_host_put(shost); } return rc; } +#ifdef CONFIG_OF +static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) +{ + struct scsi_device *sdev = dev->sdev; + struct device *d = ap->host->dev; + struct device_node *np = d->of_node; + struct device_node *child; + + for_each_available_child_of_node(np, child) { + int ret; + u32 val; + + ret = of_property_read_u32(child, "reg", &val); + if (ret) + continue; + if (val == dev->devno) { + dev_dbg(d, "found matching device node\n"); + sdev->sdev_gendev.of_node = child; + return; + } + } +} +#else +static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) +{ +} +#endif + void ata_scsi_scan_host(struct ata_port *ap, int sync) { int tries = 5; @@ -4607,6 +4336,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) NULL); if (!IS_ERR(sdev)) { dev->sdev = sdev; + ata_scsi_assign_ofnode(dev, ap); scsi_device_put(sdev); } else { dev->sdev = NULL; @@ -4929,214 +4659,3 @@ void ata_scsi_dev_rescan(struct work_struct *work) spin_unlock_irqrestore(ap->lock, flags); mutex_unlock(&ap->scsi_scan_mutex); } - -/** - * ata_sas_port_alloc - Allocate port for a SAS attached SATA device - * @host: ATA host container for all SAS ports - * @port_info: Information from low-level host driver - * @shost: SCSI host that the scsi device is attached to - * - * LOCKING: - * PCI/etc. bus probe sem. - * - * RETURNS: - * ata_port pointer on success / NULL on failure. - */ - -struct ata_port *ata_sas_port_alloc(struct ata_host *host, - struct ata_port_info *port_info, - struct Scsi_Host *shost) -{ - struct ata_port *ap; - - ap = ata_port_alloc(host); - if (!ap) - return NULL; - - ap->port_no = 0; - ap->lock = &host->lock; - ap->pio_mask = port_info->pio_mask; - ap->mwdma_mask = port_info->mwdma_mask; - ap->udma_mask = port_info->udma_mask; - ap->flags |= port_info->flags; - ap->ops = port_info->port_ops; - ap->cbl = ATA_CBL_SATA; - - return ap; -} -EXPORT_SYMBOL_GPL(ata_sas_port_alloc); - -/** - * ata_sas_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. - * - * May be used as the port_start() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -int ata_sas_port_start(struct ata_port *ap) -{ - /* - * the port is marked as frozen at allocation time, but if we don't - * have new eh, we won't thaw it - */ - if (!ap->ops->error_handler) - ap->pflags &= ~ATA_PFLAG_FROZEN; - return 0; -} -EXPORT_SYMBOL_GPL(ata_sas_port_start); - -/** - * ata_port_stop - Undo ata_sas_port_start() - * @ap: Port to shut down - * - * May be used as the port_stop() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ - -void ata_sas_port_stop(struct ata_port *ap) -{ -} -EXPORT_SYMBOL_GPL(ata_sas_port_stop); - -/** - * ata_sas_async_probe - simply schedule probing and return - * @ap: Port to probe - * - * For batch scheduling of probe for sas attached ata devices, assumes - * the port has already been through ata_sas_port_init() - */ -void ata_sas_async_probe(struct ata_port *ap) -{ - __ata_port_probe(ap); -} -EXPORT_SYMBOL_GPL(ata_sas_async_probe); - -int ata_sas_sync_probe(struct ata_port *ap) -{ - return ata_port_probe(ap); -} -EXPORT_SYMBOL_GPL(ata_sas_sync_probe); - - -/** - * ata_sas_port_init - Initialize a SATA device - * @ap: SATA port to initialize - * - * LOCKING: - * PCI/etc. bus probe sem. - * - * RETURNS: - * Zero on success, non-zero on error. - */ - -int ata_sas_port_init(struct ata_port *ap) -{ - int rc = ap->ops->port_start(ap); - - if (rc) - return rc; - ap->print_id = atomic_inc_return(&ata_print_id); - return 0; -} -EXPORT_SYMBOL_GPL(ata_sas_port_init); - -int ata_sas_tport_add(struct device *parent, struct ata_port *ap) -{ - return ata_tport_add(parent, ap); -} -EXPORT_SYMBOL_GPL(ata_sas_tport_add); - -void ata_sas_tport_delete(struct ata_port *ap) -{ - ata_tport_delete(ap); -} -EXPORT_SYMBOL_GPL(ata_sas_tport_delete); - -/** - * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc - * @ap: SATA port to destroy - * - */ - -void ata_sas_port_destroy(struct ata_port *ap) -{ - if (ap->ops->port_stop) - ap->ops->port_stop(ap); - kfree(ap); -} -EXPORT_SYMBOL_GPL(ata_sas_port_destroy); - -/** - * ata_sas_slave_configure - Default slave_config routine for libata devices - * @sdev: SCSI device to configure - * @ap: ATA port to which SCSI device is attached - * - * RETURNS: - * Zero. - */ - -int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) -{ - ata_scsi_sdev_config(sdev); - ata_scsi_dev_config(sdev, ap->link.device); - return 0; -} -EXPORT_SYMBOL_GPL(ata_sas_slave_configure); - -/** - * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device - * @cmd: SCSI command to be sent - * @ap: ATA port to which the command is being sent - * - * RETURNS: - * Return value from __ata_scsi_queuecmd() if @cmd can be queued, - * 0 otherwise. - */ - -int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) -{ - int rc = 0; - - ata_scsi_dump_cdb(ap, cmd); - - if (likely(ata_dev_enabled(ap->link.device))) - rc = __ata_scsi_queuecmd(cmd, ap->link.device); - else { - cmd->result = (DID_BAD_TARGET << 16); - cmd->scsi_done(cmd); - } - return rc; -} -EXPORT_SYMBOL_GPL(ata_sas_queuecmd); - -int ata_sas_allocate_tag(struct ata_port *ap) -{ - unsigned int max_queue = ap->host->n_tags; - unsigned int i, tag; - - for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { - tag = tag < max_queue ? tag : 0; - - /* the last tag is reserved for internal command. */ - if (ata_tag_internal(tag)) - continue; - - if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { - ap->sas_last_tag = tag; - return tag; - } - } - return -1; -} - -void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) -{ - clear_bit(tag, &ap->sas_tag_allocated); -} diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 038db94216a9..ae7189d1a568 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2,10 +2,6 @@ /* * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Tejun Heo <tj@kernel.org> - * Please ALWAYS copy linux-ide@vger.kernel.org - * on emails. - * * Copyright 2003-2006 Red Hat, Inc. All rights reserved. * Copyright 2003-2006 Jeff Garzik * diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 12a505bb9c5b..6a40e3c6cf49 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -208,7 +208,7 @@ show_ata_port_##name(struct device *dev, \ { \ struct ata_port *ap = transport_class_to_port(dev); \ \ - return snprintf(buf, 20, format_string, cast ap->field); \ + return scnprintf(buf, 20, format_string, cast ap->field); \ } #define ata_port_simple_attr(field, name, format_string, type) \ @@ -479,7 +479,7 @@ show_ata_dev_##field(struct device *dev, \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ - return snprintf(buf, 20, format_string, cast ata_dev->field); \ + return scnprintf(buf, 20, format_string, cast ata_dev->field); \ } #define ata_dev_simple_attr(field, format_string, type) \ @@ -533,7 +533,7 @@ show_ata_dev_id(struct device *dev, if (ata_dev->class == ATA_DEV_PMP) return 0; for(i=0;i<ATA_ID_WORDS;i++) { - written += snprintf(buf+written, 20, "%04x%c", + written += scnprintf(buf+written, 20, "%04x%c", ata_dev->id[i], ((i+1) & 7) ? ' ' : '\n'); } @@ -552,7 +552,7 @@ show_ata_dev_gscr(struct device *dev, if (ata_dev->class != ATA_DEV_PMP) return 0; for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) { - written += snprintf(buf+written, 20, "%08x%c", + written += scnprintf(buf+written, 20, "%08x%c", ata_dev->gscr[i], ((i+1) & 3) ? ' ' : '\n'); } @@ -581,7 +581,7 @@ show_ata_dev_trim(struct device *dev, else mode = "unqueued"; - return snprintf(buf, 20, "%s\n", mode); + return scnprintf(buf, 20, "%s\n", mode); } static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index cd8090ad43e5..68cdd81d747c 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -37,7 +37,11 @@ extern int libata_noacpi; extern int libata_allow_tpm; extern const struct device_type ata_port_type; extern struct ata_link *ata_dev_phys_link(struct ata_device *dev); +#ifdef CONFIG_ATA_FORCE extern void ata_force_cbl(struct ata_port *ap); +#else +static inline void ata_force_cbl(struct ata_port *ap) { } +#endif extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); @@ -87,6 +91,18 @@ extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, #define to_ata_port(d) container_of(d, struct ata_port, tdev) +/* libata-sata.c */ +#ifdef CONFIG_SATA_HOST +int ata_sas_allocate_tag(struct ata_port *ap); +void ata_sas_free_tag(unsigned int tag, struct ata_port *ap); +#else +static inline int ata_sas_allocate_tag(struct ata_port *ap) +{ + return -EOPNOTSUPP; +} +static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { } +#endif + /* libata-acpi.c */ #ifdef CONFIG_ATA_ACPI extern unsigned int ata_acpi_gtf_filter; @@ -112,6 +128,8 @@ static inline void ata_acpi_bind_dev(struct ata_device *dev) {} #endif /* libata-scsi.c */ +extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap, + const struct scsi_device *scsidev); extern int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht); extern void ata_scsi_scan_host(struct ata_port *ap, int sync); @@ -128,9 +146,10 @@ extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_bus_probe(struct ata_port *ap); extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, unsigned int id, u64 lun); -int ata_sas_allocate_tag(struct ata_port *ap); -void ata_sas_free_tag(unsigned int tag, struct ata_port *ap); - +void ata_scsi_sdev_config(struct scsi_device *sdev); +int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev); +void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd); +int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); /* libata-eh.c */ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index c451d7d1c817..8729f78cef5f 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -157,7 +157,6 @@ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, static void pdc_error_handler(struct ata_port *ap); static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); static int pdc_pata_cable_detect(struct ata_port *ap); -static int pdc_sata_cable_detect(struct ata_port *ap); static struct scsi_host_template pdc_ata_sht = { ATA_BASE_SHT(DRV_NAME), @@ -183,7 +182,7 @@ static const struct ata_port_operations pdc_common_ops = { static struct ata_port_operations pdc_sata_ops = { .inherits = &pdc_common_ops, - .cable_detect = pdc_sata_cable_detect, + .cable_detect = ata_cable_sata, .freeze = pdc_sata_freeze, .thaw = pdc_sata_thaw, .scr_read = pdc_sata_scr_read, @@ -459,11 +458,6 @@ static int pdc_pata_cable_detect(struct ata_port *ap) return ATA_CBL_PATA80; } -static int pdc_sata_cable_detect(struct ata_port *ap) -{ - return ATA_CBL_SATA; -} - static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 8db8c0fb5e2d..7af74fb450a0 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -91,7 +91,7 @@ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else -#define PRINTK(args...) +#define PRINTK(args...) do {} while (0) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index b8313a04422d..48efa7a047f3 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -111,7 +111,7 @@ config CFAG12864B If unsure, say N. config CFAG12864B_RATE - int "Refresh rate (hertz)" + int "Refresh rate (hertz)" depends on CFAG12864B default "20" ---help--- @@ -329,7 +329,7 @@ config PANEL_LCD_PROTO config PANEL_LCD_PIN_E depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " range -17 17 default 14 ---help--- @@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E config PANEL_LCD_PIN_RS depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " range -17 17 default 17 ---help--- @@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS config PANEL_LCD_PIN_RW depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " range -17 17 default 16 ---help--- @@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW config PANEL_LCD_PIN_SCL depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " range -17 17 default 1 ---help--- @@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL config PANEL_LCD_PIN_SDA depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " range -17 17 default 2 ---help--- @@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA config PANEL_LCD_PIN_BL depends on PANEL_PROFILE="0" && PANEL_LCD="1" - int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " range -17 17 default 0 ---help--- This describes the number of the parallel port pin to which the LCD 'BL' signal - has been connected. It can be : + has been connected. It can be : 0 : no connection (eg: connected to ground) 1..17 : directly connected to any of these pins on the DB25 plug diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 874c259a8829..c0da3820454b 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -88,7 +88,7 @@ struct charlcd_priv { int len; } esc_seq; - unsigned long long drvdata[0]; + unsigned long long drvdata[]; }; #define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd) diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index efb928e25aef..1cce409ce5ca 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev) const struct of_device_id *match; const struct img_ascii_lcd_config *cfg; struct img_ascii_lcd_ctx *ctx; - struct resource *res; int err; match = of_match_device(img_ascii_lcd_matches, &pdev->dev); @@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev) &ctx->offset)) return -EINVAL; } else { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(&pdev->dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 6119e11a9f95..e5d691cf824c 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -94,7 +94,7 @@ static void update_topology_flags_workfn(struct work_struct *work) update_topology = 0; } -static u32 capacity_scale; +static DEFINE_PER_CPU(u32, freq_factor) = 1; static u32 *raw_capacity; static int free_raw_capacity(void) @@ -108,17 +108,23 @@ static int free_raw_capacity(void) void topology_normalize_cpu_scale(void) { u64 capacity; + u64 capacity_scale; int cpu; if (!raw_capacity) return; - pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); + capacity_scale = 1; for_each_possible_cpu(cpu) { - pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", - cpu, raw_capacity[cpu]); - capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) - / capacity_scale; + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity_scale = max(capacity, capacity_scale); + } + + pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); + for_each_possible_cpu(cpu) { + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); topology_set_cpu_scale(cpu, capacity); pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", cpu, topology_get_cpu_scale(cpu)); @@ -127,6 +133,7 @@ void topology_normalize_cpu_scale(void) bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) { + struct clk *cpu_clk; static bool cap_parsing_failed; int ret; u32 cpu_capacity; @@ -146,10 +153,22 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return false; } } - capacity_scale = max(cpu_capacity, capacity_scale); raw_capacity[cpu] = cpu_capacity; pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", cpu_node, raw_capacity[cpu]); + + /* + * Update freq_factor for calculating early boot cpu capacities. + * For non-clk CPU DVFS mechanism, there's no way to get the + * frequency value now, assuming they are running at the same + * frequency (by keeping the initial freq_factor value). + */ + cpu_clk = of_clk_get(cpu_node, 0); + if (!PTR_ERR_OR_ZERO(cpu_clk)) { + per_cpu(freq_factor, cpu) = + clk_get_rate(cpu_clk) / 1000; + clk_put(cpu_clk); + } } else { if (raw_capacity) { pr_err("cpu_capacity: missing %pOF raw capacity\n", @@ -188,11 +207,8 @@ init_cpu_capacity_callback(struct notifier_block *nb, cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) { - raw_capacity[cpu] = topology_get_cpu_scale(cpu) * - policy->cpuinfo.max_freq / 1000UL; - capacity_scale = max(raw_capacity[cpu], capacity_scale); - } + for_each_cpu(cpu, policy->related_cpus) + per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); @@ -281,7 +297,7 @@ static int __init get_cpu_for_node(struct device_node *node) static int __init parse_core(struct device_node *core, int package_id, int core_id) { - char name[10]; + char name[20]; bool leaf = true; int i = 0; int cpu; @@ -327,7 +343,7 @@ static int __init parse_core(struct device_node *core, int package_id, static int __init parse_cluster(struct device_node *cluster, int depth) { - char name[10]; + char name[20]; bool leaf = true; bool has_cores = false; struct device_node *c; diff --git a/drivers/base/component.c b/drivers/base/component.c index c7879f5ae2fb..e97704104784 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -528,7 +528,8 @@ static void component_unbind(struct component *component, { WARN_ON(!component->bound); - component->ops->unbind(component->dev, master->dev, data); + if (component->ops && component->ops->unbind) + component->ops->unbind(component->dev, master->dev, data); component->bound = false; /* Release all resources claimed in the binding of this component */ diff --git a/drivers/base/core.c b/drivers/base/core.c index 42a672456432..5e3cc1651c78 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -64,12 +64,12 @@ static inline void device_links_write_unlock(void) mutex_unlock(&device_links_lock); } -int device_links_read_lock(void) +int device_links_read_lock(void) __acquires(&device_links_srcu) { return srcu_read_lock(&device_links_srcu); } -void device_links_read_unlock(int idx) +void device_links_read_unlock(int idx) __releases(&device_links_srcu) { srcu_read_unlock(&device_links_srcu, idx); } @@ -523,9 +523,13 @@ static void device_link_add_missing_supplier_links(void) mutex_lock(&wfs_lock); list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, - links.needs_suppliers) - if (!fwnode_call_int_op(dev->fwnode, add_links, dev)) + links.needs_suppliers) { + int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); + if (!ret) list_del_init(&dev->links.needs_suppliers); + else if (ret != -ENODEV) + dev->links.need_for_probe = false; + } mutex_unlock(&wfs_lock); } @@ -718,6 +722,8 @@ static void __device_links_queue_sync_state(struct device *dev, { struct device_link *link; + if (!dev_has_sync_state(dev)) + return; if (dev->state_synced) return; @@ -745,25 +751,31 @@ static void __device_links_queue_sync_state(struct device *dev, /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. */ -static void device_links_flush_sync_list(struct list_head *list) +static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) { struct device *dev, *tmp; list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync); - device_lock(dev); + if (dev != dont_lock_dev) + device_lock(dev); if (dev->bus->sync_state) dev->bus->sync_state(dev); else if (dev->driver && dev->driver->sync_state) dev->driver->sync_state(dev); - device_unlock(dev); + if (dev != dont_lock_dev) + device_unlock(dev); put_device(dev); } @@ -801,7 +813,7 @@ void device_links_supplier_sync_state_resume(void) out: device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, NULL); } static int sync_state_resume_initcall(void) @@ -813,7 +825,7 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync)) + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); } @@ -865,6 +877,11 @@ void device_links_driver_bound(struct device *dev) driver_deferred_probe_add(link->consumer); } + if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; @@ -883,7 +900,7 @@ void device_links_driver_bound(struct device *dev) device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, dev); } static void device_link_drop_managed(struct device_link *link) @@ -2328,6 +2345,31 @@ static int device_private_init(struct device *dev) return 0; } +static u32 fw_devlink_flags; +static int __init fw_devlink_setup(char *arg) +{ + if (!arg) + return -EINVAL; + + if (strcmp(arg, "off") == 0) { + fw_devlink_flags = 0; + } else if (strcmp(arg, "permissive") == 0) { + fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; + } else if (strcmp(arg, "on") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; + } else if (strcmp(arg, "rpm") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | + DL_FLAG_PM_RUNTIME; + } + return 0; +} +early_param("fw_devlink", fw_devlink_setup); + +u32 fw_devlink_get_flags(void) +{ + return fw_devlink_flags; +} + /** * device_add - add device to device hierarchy. * @dev: device. @@ -2362,6 +2404,7 @@ int device_add(struct device *dev) struct class_interface *class_intf; int error = -EINVAL, fw_ret; struct kobject *glue_dir = NULL; + bool is_fwnode_dev = false; dev = get_device(dev); if (!dev) @@ -2459,8 +2502,10 @@ int device_add(struct device *dev) kobject_uevent(&dev->kobj, KOBJ_ADD); - if (dev->fwnode && !dev->fwnode->dev) + if (dev->fwnode && !dev->fwnode->dev) { dev->fwnode->dev = dev; + is_fwnode_dev = true; + } /* * Check if any of the other devices (consumers) have been waiting for @@ -2476,7 +2521,8 @@ int device_add(struct device *dev) */ device_link_add_missing_supplier_links(); - if (fwnode_has_op(dev->fwnode, add_links)) { + if (fw_devlink_flags && is_fwnode_dev && + fwnode_has_op(dev->fwnode, add_links)) { fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); if (fw_ret == -ENODEV) device_link_wait_for_mandatory_supplier(dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 6265871a4af2..9a1c00fbbaef 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -55,7 +55,7 @@ static int cpu_subsys_online(struct device *dev) if (from_nid == NUMA_NO_NODE) return -ENODEV; - ret = cpu_up(cpuid); + ret = cpu_device_up(dev); /* * When hot adding memory to memoryless node and enabling a cpu * on the node, node number of the cpu may internally change. @@ -69,7 +69,7 @@ static int cpu_subsys_online(struct device *dev) static int cpu_subsys_offline(struct device *dev) { - return cpu_down(dev->id); + return cpu_device_down(dev); } void unregister_cpu(struct cpu *cpu) @@ -231,8 +231,7 @@ static struct cpu_attr cpu_attrs[] = { static ssize_t print_cpus_kernel_max(struct device *dev, struct device_attribute *attr, char *buf) { - int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); - return n; + return sprintf(buf, "%d\n", NR_CPUS - 1); } static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); @@ -258,13 +257,13 @@ static ssize_t print_cpus_offline(struct device *dev, buf[n++] = ','; if (nr_cpu_ids == total_cpus-1) - n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); + n += scnprintf(&buf[n], len - n, "%u", nr_cpu_ids); else - n += snprintf(&buf[n], len - n, "%u-%d", + n += scnprintf(&buf[n], len - n, "%u-%d", nr_cpu_ids, total_cpus-1); } - n += snprintf(&buf[n], len - n, "\n"); + n += scnprintf(&buf[n], len - n, "\n"); return n; } static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); @@ -272,7 +271,7 @@ static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; + int n; cpumask_var_t isolated; if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) @@ -280,7 +279,7 @@ static ssize_t print_cpus_isolated(struct device *dev, cpumask_andnot(isolated, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated)); + n = sprintf(buf, "%*pbl\n", cpumask_pr_args(isolated)); free_cpumask_var(isolated); @@ -292,11 +291,7 @@ static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); static ssize_t print_cpus_nohz_full(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; - - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); - - return n; + return sprintf(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); } static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); #endif diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b25bcab2a26b..06ec0e851fa1 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -224,76 +224,52 @@ static int deferred_devs_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(deferred_devs); -static int deferred_probe_timeout = -1; +#ifdef CONFIG_MODULES +/* + * In the case of modules, set the default probe timeout to + * 30 seconds to give userland some time to load needed modules + */ +int driver_deferred_probe_timeout = 30; +#else +/* In the case of !modules, no probe timeout needed */ +int driver_deferred_probe_timeout = -1; +#endif +EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); + static int __init deferred_probe_timeout_setup(char *str) { int timeout; if (!kstrtoint(str, 10, &timeout)) - deferred_probe_timeout = timeout; + driver_deferred_probe_timeout = timeout; return 1; } __setup("deferred_probe_timeout=", deferred_probe_timeout_setup); -static int __driver_deferred_probe_check_state(struct device *dev) -{ - if (!initcalls_done) - return -EPROBE_DEFER; - - if (!deferred_probe_timeout) { - dev_WARN(dev, "deferred probe timeout, ignoring dependency"); - return -ETIMEDOUT; - } - - return 0; -} - /** * driver_deferred_probe_check_state() - Check deferred probe state * @dev: device to check * - * Returns -ENODEV if init is done and all built-in drivers have had a chance - * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug - * timeout has expired, or -EPROBE_DEFER if none of those conditions are met. + * Return: + * -ENODEV if initcalls have completed and modules are disabled. + * -ETIMEDOUT if the deferred probe timeout was set and has expired + * and modules are enabled. + * -EPROBE_DEFER in other cases. * * Drivers or subsystems can opt-in to calling this function instead of directly * returning -EPROBE_DEFER. */ int driver_deferred_probe_check_state(struct device *dev) { - int ret; - - ret = __driver_deferred_probe_check_state(dev); - if (ret < 0) - return ret; - - dev_warn(dev, "ignoring dependency for device, assuming no driver"); - - return -ENODEV; -} - -/** - * driver_deferred_probe_check_state_continue() - check deferred probe state - * @dev: device to check - * - * Returns -ETIMEDOUT if deferred probe debug timeout has expired, or - * -EPROBE_DEFER otherwise. - * - * Drivers or subsystems can opt-in to calling this function instead of - * directly returning -EPROBE_DEFER. - * - * This is similar to driver_deferred_probe_check_state(), but it allows the - * subsystem to keep deferring probe after built-in drivers have had a chance - * to probe. One scenario where that is useful is if built-in drivers rely on - * resources that are provided by modular drivers. - */ -int driver_deferred_probe_check_state_continue(struct device *dev) -{ - int ret; + if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) { + dev_warn(dev, "ignoring dependency for device, assuming no driver"); + return -ENODEV; + } - ret = __driver_deferred_probe_check_state(dev); - if (ret < 0) - return ret; + if (!driver_deferred_probe_timeout) { + dev_WARN(dev, "deferred probe timeout, ignoring dependency"); + return -ETIMEDOUT; + } return -EPROBE_DEFER; } @@ -302,7 +278,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) { struct device_private *private, *p; - deferred_probe_timeout = 0; + driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); @@ -336,9 +312,9 @@ static int deferred_probe_initcall(void) driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - if (deferred_probe_timeout > 0) { + if (driver_deferred_probe_timeout > 0) { schedule_delayed_work(&deferred_probe_timeout_work, - deferred_probe_timeout * HZ); + driver_deferred_probe_timeout * HZ); } return 0; } @@ -668,9 +644,10 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) */ int driver_probe_done(void) { - pr_debug("%s: probe_count = %d\n", __func__, - atomic_read(&probe_count)); - if (atomic_read(&probe_count)) + int local_probe_count = atomic_read(&probe_count); + + pr_debug("%s: probe_count = %d\n", __func__, local_probe_count); + if (local_probe_count) return -EBUSY; return 0; } @@ -1222,7 +1199,7 @@ void driver_detach(struct device_driver *drv) spin_unlock(&drv->p->klist_devices.k_lock); break; } - dev_prv = list_entry(drv->p->klist_devices.k_list.prev, + dev_prv = list_last_entry(&drv->p->klist_devices.k_list, struct device_private, knode_driver.n_node); dev = dev_prv->device; diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index 0b2dfa6259c9..e87843408fe6 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o +firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o obj-y += builtin/ diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 8704e1bae175..1e9c96e3ed63 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, } retval = fw_sysfs_wait_timeout(fw_priv, timeout); - if (retval < 0) { + if (retval < 0 && retval != -ENOENT) { mutex_lock(&fw_lock); fw_load_abort(fw_sysfs); mutex_unlock(&fw_lock); diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h index 21063503e4ea..06f4577733a8 100644 --- a/drivers/base/firmware_loader/fallback.h +++ b/drivers/base/firmware_loader/fallback.h @@ -66,4 +66,14 @@ static inline void unregister_sysfs_loader(void) } #endif /* CONFIG_FW_LOADER_USER_HELPER */ +#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags); +#else +static inline int firmware_fallback_platform(struct fw_priv *fw_priv, + enum fw_opt opt_flags) +{ + return -ENOENT; +} +#endif + #endif /* __FIRMWARE_FALLBACK_H */ diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c new file mode 100644 index 000000000000..c88c745590fe --- /dev/null +++ b/drivers/base/firmware_loader/fallback_platform.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi_embedded_fw.h> +#include <linux/property.h> +#include <linux/security.h> +#include <linux/vmalloc.h> + +#include "fallback.h" +#include "firmware.h" + +int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags) +{ + const u8 *data; + size_t size; + int rc; + + if (!(opt_flags & FW_OPT_FALLBACK_PLATFORM)) + return -ENOENT; + + rc = security_kernel_load_data(LOADING_FIRMWARE_EFI_EMBEDDED); + if (rc) + return rc; + + rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size); + if (rc) + return rc; /* rc == -ENOENT when the fw was not found */ + + fw_priv->data = vmalloc(size); + if (!fw_priv->data) + return -ENOMEM; + + memcpy(fw_priv->data, data, size); + fw_priv->size = size; + fw_state_done(fw_priv); + return 0; +} diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 8656e5239a80..25836a6afc9f 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -29,6 +29,9 @@ * firmware caching mechanism. * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER. + * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in + * the platform's main firmware. If both this fallback and the sysfs + * fallback are enabled, then this fallback will be tried first. */ enum fw_opt { FW_OPT_UEVENT = BIT(0), @@ -37,6 +40,7 @@ enum fw_opt { FW_OPT_NO_WARN = BIT(3), FW_OPT_NOCACHE = BIT(4), FW_OPT_NOFALLBACK_SYSFS = BIT(5), + FW_OPT_FALLBACK_PLATFORM = BIT(6), }; enum fw_status { diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 57133a9dad09..76f79913916d 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -493,8 +493,10 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, } fw_priv->size = 0; - rc = kernel_read_file_from_path(path, &buffer, &size, - msize, id); + + /* load firmware files from the mount namespace of init */ + rc = kernel_read_file_from_path_initns(path, &buffer, + &size, msize, id); if (rc) { if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", @@ -776,6 +778,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, fw_decompress_xz); #endif + if (ret == -ENOENT) + ret = firmware_fallback_platform(fw->priv, opt_flags); + if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, @@ -884,6 +889,30 @@ int request_firmware_direct(const struct firmware **firmware_p, EXPORT_SYMBOL_GPL(request_firmware_direct); /** + * firmware_request_platform() - request firmware with platform-fw fallback + * @firmware: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function is similar in behaviour to request_firmware, except that if + * direct filesystem lookup fails, it will fallback to looking for a copy of the + * requested firmware embedded in the platform's main (e.g. UEFI) firmware. + **/ +int firmware_request_platform(const struct firmware **firmware, + const char *name, struct device *device) +{ + int ret; + + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware, name, device, NULL, 0, + FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(firmware_request_platform); + +/** * firmware_request_cache() - cache firmware for suspend so resume can use it * @name: name of firmware file * @device: device for which firmware should be cached for diff --git a/drivers/base/memory.c b/drivers/base/memory.c index b9f474c11393..4086718f6876 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -97,30 +97,13 @@ static ssize_t phys_index_show(struct device *dev, } /* - * Show whether the memory block is likely to be offlineable (or is already - * offline). Once offline, the memory block could be removed. The return - * value does, however, not indicate that there is a way to remove the - * memory block. + * Legacy interface that we cannot remove. Always indicate "removable" + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. */ static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = to_memory_block(dev); - unsigned long pfn; - int ret = 1, i; - - if (mem->state != MEM_ONLINE) - goto out; - - for (i = 0; i < sections_per_block; i++) { - if (!present_section_nr(mem->start_section_nr + i)) - continue; - pfn = section_nr_to_pfn(mem->start_section_nr + i); - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); - } - -out: - return sprintf(buf, "%d\n", ret); + return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7fa654f1288b..5255550b7c34 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -63,6 +63,28 @@ EXPORT_SYMBOL_GPL(platform_get_resource); #ifdef CONFIG_HAS_IOMEM /** + * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a + * platform device and get resource + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @index: resource index + * @res: optional output parameter to store a pointer to the obtained resource. + */ +void __iomem * +devm_platform_get_and_ioremap_resource(struct platform_device *pdev, + unsigned int index, struct resource **res) +{ + struct resource *r; + + r = platform_get_resource(pdev, IORESOURCE_MEM, index); + if (res) + *res = r; + return devm_ioremap_resource(&pdev->dev, r); +} +EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); + +/** * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform * device * @@ -73,10 +95,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource); void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, index); - return devm_ioremap_resource(&pdev->dev, res); + return devm_platform_get_and_ioremap_resource(pdev, index, NULL); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); @@ -363,10 +382,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev) { if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if (!pdev->dma_mask) - pdev->dma_mask = DMA_BIT_MASK(32); - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dma_mask; + if (!pdev->dev.dma_mask) { + pdev->platform_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->platform_dma_mask; + } }; /** @@ -662,20 +681,8 @@ struct platform_device *platform_device_register_full( pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { - /* - * This memory isn't freed when the device is put, - * I don't have a nice idea for that though. Conceptually - * dma_mask in struct device should not be a pointer. - * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 - */ - pdev->dev.dma_mask = - kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) - goto err; - - kmemleak_ignore(pdev->dev.dma_mask); - - *pdev->dev.dma_mask = pdevinfo->dma_mask; + pdev->platform_dma_mask = pdevinfo->dma_mask; + pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -700,7 +707,6 @@ struct platform_device *platform_device_register_full( if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); - kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(ret); } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 959d6d5eb000..0a01df608849 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn, ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); if (ret <= 0) - return ret; + return ret == -ENOENT ? 0 : ret; /* Loop over the phandles until all the requested entry is found */ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0e99a760aebd..6d1dee7051eb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,6 +40,10 @@ typedef int (*pm_callback_t)(struct device *); +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + device_links_read_lock_held()) + /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) * callbacks freeing the link objects for the links in the list we're * walking. */ - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->supplier, async); @@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) * continue instead of trying to continue in parallel with its * unregistration). */ - list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->consumer, async); @@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) link->supplier->power.must_resume = true; device_links_read_unlock(idx); @@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { spin_lock_irq(&link->supplier->power.lock); link->supplier->power.direct_complete = false; spin_unlock_irq(&link->supplier->power.lock); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 16134a69bf6f..99c7da112c95 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** - * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. + * pm_runtime_get_if_active - Conditionally bump up the device's usage counter. * @dev: Device to handle. * * Return -EINVAL if runtime PM is disabled for the device. * - * If that's not the case and if the device's runtime PM status is RPM_ACTIVE - * and the runtime PM usage counter is nonzero, increment the counter and - * return 1. Otherwise return 0 without changing the counter. + * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either + * ign_usage_count is true or the device's usage_count is non-zero, increment + * the counter and return 1. Otherwise return 0 without changing the counter. + * + * If ign_usage_count is true, the function can be used to prevent suspending + * the device when its runtime PM status is RPM_ACTIVE. + * + * If ign_usage_count is false, the function can be used to prevent suspending + * the device when both its runtime PM status is RPM_ACTIVE and its usage_count + * is non-zero. + * + * The caller is resposible for putting the device's usage count when ther + * return value is greater than zero. */ -int pm_runtime_get_if_in_use(struct device *dev) +int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) { unsigned long flags; int retval; spin_lock_irqsave(&dev->power.lock, flags); - retval = dev->power.disable_depth > 0 ? -EINVAL : - dev->power.runtime_status == RPM_ACTIVE - && atomic_inc_not_zero(&dev->power.usage_count); + if (dev->power.disable_depth > 0) { + retval = -EINVAL; + } else if (dev->power.runtime_status != RPM_ACTIVE) { + retval = 0; + } else if (ign_usage_count) { + retval = 1; + atomic_inc(&dev->power.usage_count); + } else { + retval = atomic_inc_not_zero(&dev->power.usage_count); + } trace_rpm_usage_rcuidle(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); + return retval; } -EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); +EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); /** * __pm_runtime_set_status - Set runtime PM status of a device. diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 27f3e60608e5..92073ac68473 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state; #define pm_suspend_target_state (PM_SUSPEND_ON) #endif +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + srcu_read_lock_held(&wakeup_srcu)) /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. @@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); - wakeup_source_sysfs_remove(ws); + if (ws->dev) + wakeup_source_sysfs_remove(ws); + wakeup_source_destroy(ws); } } @@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } @@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } @@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void) struct wakeup_source *last_activity_ws = NULL; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (ws->active) { pm_pr_dbg("active wakeup source: %s\n", ws->name); active = 1; @@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { ws->autosleep_enabled = set; @@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m, } *srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (n-- <= 0) return ws; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 511f6d7acdfe..5f35c0ccf5e0 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -566,6 +566,7 @@ const char *fwnode_get_name(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_name); } +EXPORT_SYMBOL_GPL(fwnode_get_name); /** * fwnode_get_name_prefix - Return the prefix of node for printing purposes diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 0b081dee1e95..de8d3543e8fe 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -608,6 +608,13 @@ static void software_node_release(struct kobject *kobj) { struct swnode *swnode = kobj_to_swnode(kobj); + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + if (swnode->allocated) { property_entries_free(swnode->node->properties); kfree(swnode->node); @@ -773,13 +780,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode) if (!swnode) return; - if (swnode->parent) { - ida_simple_remove(&swnode->parent->child_ids, swnode->id); - list_del(&swnode->entry); - } else { - ida_simple_remove(&swnode_root_ids, swnode->id); - } - kobject_put(&swnode->kobj); } EXPORT_SYMBOL_GPL(fwnode_remove_software_node); diff --git a/drivers/block/Makefile b/drivers/block/Makefile index a53cc1e3a2d3..795facd8cf19 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -6,6 +6,9 @@ # Rewritten to use lists instead of if-statements. # +# needed for trace events +ccflags-y += -I$(src) + obj-$(CONFIG_MAC_FLOPPY) += swim3.o obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o obj-$(CONFIG_BLK_DEV_FD) += floppy.o @@ -39,6 +42,9 @@ obj-$(CONFIG_ZRAM) += zram/ obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o null_blk-objs := null_blk_main.o +ifeq ($(CONFIG_BLK_DEV_ZONED), y) +null_blk-$(CONFIG_TRACING) += null_blk_trace.o +endif null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o skd-y := skd_main.o diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 7b32fb673375..a27804d71e12 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -87,9 +87,9 @@ static ssize_t aoedisk_show_netif(struct device *dev, if (*nd == NULL) return snprintf(page, PAGE_SIZE, "none\n"); for (p = page; nd < ne; nd++) - p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", + p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s", p == page ? "" : ",", (*nd)->name); - p += snprintf(p, PAGE_SIZE - (p-page), "\n"); + p += scnprintf(p, PAGE_SIZE - (p-page), "\n"); return p-page; } /* firmware version */ diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 220c5e18aba0..2fb25c348d53 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -381,12 +381,10 @@ static struct brd_device *brd_alloc(int i) spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); - brd->brd_queue = blk_alloc_queue(GFP_KERNEL); + brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE); if (!brd->brd_queue) goto out_free_dev; - blk_queue_make_request(brd->brd_queue, brd_make_request); - /* This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN * (This is only a problem on very small devices <= 4M, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a18155cdce41..c094c3c2c5d4 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2801,7 +2801,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE); if (!q) goto out_no_q; device->rq_queue = q; @@ -2828,7 +2828,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig q->backing_dev_info->congested_fn = drbd_congested; q->backing_dev_info->congested_data = device; - blk_queue_make_request(q, drbd_make_request); blk_queue_write_cache(q, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ @@ -3414,22 +3413,11 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) * the meta-data super block. This function sets MD_DIRTY, and starts a * timer that ensures that within five seconds you have to call drbd_md_sync(). */ -#ifdef DEBUG -void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func) -{ - if (!test_and_set_bit(MD_DIRTY, &device->flags)) { - mod_timer(&device->md_sync_timer, jiffies + HZ); - device->last_md_mark_dirty.line = line; - device->last_md_mark_dirty.func = func; - } -} -#else void drbd_md_mark_dirty(struct drbd_device *device) { if (!test_and_set_bit(MD_DIRTY, &device->flags)) mod_timer(&device->md_sync_timer, jiffies + 5*HZ); } -#endif void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local) { diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 79e216446030..c15e7083b13a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -33,6 +33,7 @@ #include <linux/random.h> #include <linux/string.h> #include <linux/scatterlist.h> +#include <linux/part_stat.h> #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index b7f605c6e231..0dc019da1f8d 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -22,6 +22,7 @@ #include <linux/random.h> #include <linux/string.h> #include <linux/scatterlist.h> +#include <linux/part_stat.h> #include "drbd_int.h" #include "drbd_protocol.h" diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cd3612e4e2e1..c3daa64cb52c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -171,7 +171,6 @@ static int print_unex = 1; #include <linux/kernel.h> #include <linux/timer.h> #include <linux/workqueue.h> -#define FDPATCHES #include <linux/fdreg.h> #include <linux/fd.h> #include <linux/hdreg.h> @@ -306,36 +305,26 @@ static bool initialized; /* reverse mapping from unit and fdc to drive */ #define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2)) -#define DP (&drive_params[current_drive]) -#define DRS (&drive_state[current_drive]) -#define DRWE (&write_errors[current_drive]) -#define FDCS (&fdc_state[fdc]) - -#define UDP (&drive_params[drive]) -#define UDRS (&drive_state[drive]) -#define UDRWE (&write_errors[drive]) -#define UFDCS (&fdc_state[FDC(drive)]) - #define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2) #define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH) -/* read/write */ -#define COMMAND (raw_cmd->cmd[0]) -#define DR_SELECT (raw_cmd->cmd[1]) -#define TRACK (raw_cmd->cmd[2]) -#define HEAD (raw_cmd->cmd[3]) -#define SECTOR (raw_cmd->cmd[4]) -#define SIZECODE (raw_cmd->cmd[5]) -#define SECT_PER_TRACK (raw_cmd->cmd[6]) -#define GAP (raw_cmd->cmd[7]) -#define SIZECODE2 (raw_cmd->cmd[8]) +/* read/write commands */ +#define COMMAND 0 +#define DR_SELECT 1 +#define TRACK 2 +#define HEAD 3 +#define SECTOR 4 +#define SIZECODE 5 +#define SECT_PER_TRACK 6 +#define GAP 7 +#define SIZECODE2 8 #define NR_RW 9 -/* format */ -#define F_SIZECODE (raw_cmd->cmd[2]) -#define F_SECT_PER_TRACK (raw_cmd->cmd[3]) -#define F_GAP (raw_cmd->cmd[4]) -#define F_FILL (raw_cmd->cmd[5]) +/* format commands */ +#define F_SIZECODE 2 +#define F_SECT_PER_TRACK 3 +#define F_GAP 4 +#define F_FILL 5 #define NR_F 6 /* @@ -351,14 +340,14 @@ static bool initialized; #define MAX_REPLIES 16 static unsigned char reply_buffer[MAX_REPLIES]; static int inr; /* size of reply buffer, when called from interrupt */ -#define ST0 (reply_buffer[0]) -#define ST1 (reply_buffer[1]) -#define ST2 (reply_buffer[2]) -#define ST3 (reply_buffer[0]) /* result of GETSTATUS */ -#define R_TRACK (reply_buffer[3]) -#define R_HEAD (reply_buffer[4]) -#define R_SECTOR (reply_buffer[5]) -#define R_SIZECODE (reply_buffer[6]) +#define ST0 0 +#define ST1 1 +#define ST2 2 +#define ST3 0 /* result of GETSTATUS */ +#define R_TRACK 3 +#define R_HEAD 4 +#define R_SECTOR 5 +#define R_SIZECODE 6 #define SEL_DLY (2 * HZ / 100) @@ -593,7 +582,7 @@ static int buffer_max = -1; /* fdc related variables, should end up in a struct */ static struct floppy_fdc_state fdc_state[N_FDC]; -static int fdc; /* current fdc */ +static int current_fdc; /* current fdc */ static struct workqueue_struct *floppy_wq; @@ -604,9 +593,19 @@ static unsigned char fsector_t; /* sector in track */ static unsigned char in_sector_offset; /* offset within physical sector, * expressed in units of 512 bytes */ +static inline unsigned char fdc_inb(int fdc, int reg) +{ + return fd_inb(fdc_state[fdc].address + reg); +} + +static inline void fdc_outb(unsigned char value, int fdc, int reg) +{ + fd_outb(value, fdc_state[fdc].address + reg); +} + static inline bool drive_no_geom(int drive) { - return !current_type[drive] && !ITYPE(UDRS->fd_device); + return !current_type[drive] && !ITYPE(drive_state[drive].fd_device); } #ifndef fd_eject @@ -630,7 +629,7 @@ static inline void set_debugt(void) static inline void debugt(const char *func, const char *msg) { - if (DP->flags & DEBUGT) + if (drive_params[current_drive].flags & DEBUGT) pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer); } #else @@ -683,10 +682,10 @@ static void __reschedule_timeout(int drive, const char *message) delay = 20UL * HZ; drive = 0; } else - delay = UDP->timeout; + delay = drive_params[drive].timeout; mod_delayed_work(floppy_wq, &fd_timeout, delay); - if (UDP->flags & FD_DEBUG) + if (drive_params[drive].flags & FD_DEBUG) DPRINT("reschedule timeout %s\n", message); timeout_message = message; } @@ -740,33 +739,37 @@ static int disk_change(int drive) { int fdc = FDC(drive); - if (time_before(jiffies, UDRS->select_date + UDP->select_delay)) + if (time_before(jiffies, drive_state[drive].select_date + drive_params[drive].select_delay)) DPRINT("WARNING disk change called early\n"); - if (!(FDCS->dor & (0x10 << UNIT(drive))) || - (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { + if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))) || + (fdc_state[fdc].dor & 3) != UNIT(drive) || fdc != FDC(drive)) { DPRINT("probing disk change on unselected drive\n"); DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive), - (unsigned int)FDCS->dor); + (unsigned int)fdc_state[fdc].dor); } - debug_dcl(UDP->flags, + debug_dcl(drive_params[drive].flags, "checking disk change line for drive %d\n", drive); - debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies); - debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80); - debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags); - - if (UDP->flags & FD_BROKEN_DCL) - return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); - if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) { - set_bit(FD_VERIFY_BIT, &UDRS->flags); + debug_dcl(drive_params[drive].flags, "jiffies=%lu\n", jiffies); + debug_dcl(drive_params[drive].flags, "disk change line=%x\n", + fdc_inb(fdc, FD_DIR) & 0x80); + debug_dcl(drive_params[drive].flags, "flags=%lx\n", + drive_state[drive].flags); + + if (drive_params[drive].flags & FD_BROKEN_DCL) + return test_bit(FD_DISK_CHANGED_BIT, + &drive_state[drive].flags); + if ((fdc_inb(fdc, FD_DIR) ^ drive_params[drive].flags) & 0x80) { + set_bit(FD_VERIFY_BIT, &drive_state[drive].flags); /* verify write protection */ - if (UDRS->maxblock) /* mark it changed */ - set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); + if (drive_state[drive].maxblock) /* mark it changed */ + set_bit(FD_DISK_CHANGED_BIT, + &drive_state[drive].flags); /* invalidate its geometry */ - if (UDRS->keep_data >= 0) { - if ((UDP->flags & FTD_MSG) && + if (drive_state[drive].keep_data >= 0) { + if ((drive_params[drive].flags & FTD_MSG) && current_type[drive] != NULL) DPRINT("Disk type is undefined after disk change\n"); current_type[drive] = NULL; @@ -775,8 +778,8 @@ static int disk_change(int drive) return 1; } else { - UDRS->last_checked = jiffies; - clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); + drive_state[drive].last_checked = jiffies; + clear_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags); } return 0; } @@ -799,26 +802,26 @@ static int set_dor(int fdc, char mask, char data) unsigned char newdor; unsigned char olddor; - if (FDCS->address == -1) + if (fdc_state[fdc].address == -1) return -1; - olddor = FDCS->dor; + olddor = fdc_state[fdc].dor; newdor = (olddor & mask) | data; if (newdor != olddor) { unit = olddor & 0x3; if (is_selected(olddor, unit) && !is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); - debug_dcl(UDP->flags, + debug_dcl(drive_params[drive].flags, "calling disk change from set_dor\n"); disk_change(drive); } - FDCS->dor = newdor; - fd_outb(newdor, FD_DOR); + fdc_state[fdc].dor = newdor; + fdc_outb(newdor, fdc, FD_DOR); unit = newdor & 0x3; if (!is_selected(olddor, unit) && is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); - UDRS->select_date = jiffies; + drive_state[drive].select_date = jiffies; } } return olddor; @@ -826,11 +829,12 @@ static int set_dor(int fdc, char mask, char data) static void twaddle(void) { - if (DP->select_delay) + if (drive_params[current_drive].select_delay) return; - fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR); - fd_outb(FDCS->dor, FD_DOR); - DRS->select_date = jiffies; + fdc_outb(fdc_state[current_fdc].dor & ~(0x10 << UNIT(current_drive)), + current_fdc, FD_DOR); + fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR); + drive_state[current_drive].select_date = jiffies; } /* @@ -841,34 +845,38 @@ static void reset_fdc_info(int mode) { int drive; - FDCS->spec1 = FDCS->spec2 = -1; - FDCS->need_configure = 1; - FDCS->perp_mode = 1; - FDCS->rawcmd = 0; + fdc_state[current_fdc].spec1 = fdc_state[current_fdc].spec2 = -1; + fdc_state[current_fdc].need_configure = 1; + fdc_state[current_fdc].perp_mode = 1; + fdc_state[current_fdc].rawcmd = 0; for (drive = 0; drive < N_DRIVE; drive++) - if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL)) - UDRS->track = NEED_2_RECAL; + if (FDC(drive) == current_fdc && + (mode || drive_state[drive].track != NEED_1_RECAL)) + drive_state[drive].track = NEED_2_RECAL; } /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { + unsigned int new_fdc = current_fdc; + if (drive >= 0 && drive < N_DRIVE) { - fdc = FDC(drive); + new_fdc = FDC(drive); current_drive = drive; } - if (fdc != 1 && fdc != 0) { + if (new_fdc >= N_FDC) { pr_info("bad fdc value\n"); return; } - set_dor(fdc, ~0, 8); + current_fdc = new_fdc; + set_dor(current_fdc, ~0, 8); #if N_FDC > 1 - set_dor(1 - fdc, ~8, 0); + set_dor(1 - current_fdc, ~8, 0); #endif - if (FDCS->rawcmd == 2) + if (fdc_state[current_fdc].rawcmd == 2) reset_fdc_info(1); - if (fd_inb(FD_STATUS) != STATUS_READY) - FDCS->reset = 1; + if (fdc_inb(current_fdc, FD_STATUS) != STATUS_READY) + fdc_state[current_fdc].reset = 1; } /* locks the driver */ @@ -921,19 +929,19 @@ static void floppy_off(unsigned int drive) unsigned long volatile delta; int fdc = FDC(drive); - if (!(FDCS->dor & (0x10 << UNIT(drive)))) + if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive)))) return; del_timer(motor_off_timer + drive); /* make spindle stop in a position which minimizes spinup time * next time */ - if (UDP->rps) { - delta = jiffies - UDRS->first_read_date + HZ - - UDP->spindown_offset; - delta = ((delta * UDP->rps) % HZ) / UDP->rps; + if (drive_params[drive].rps) { + delta = jiffies - drive_state[drive].first_read_date + HZ - + drive_params[drive].spindown_offset; + delta = ((delta * drive_params[drive].rps) % HZ) / drive_params[drive].rps; motor_off_timer[drive].expires = - jiffies + UDP->spindown - delta; + jiffies + drive_params[drive].spindown - delta; } add_timer(motor_off_timer + drive); } @@ -949,20 +957,20 @@ static void scandrives(void) int drive; int saved_drive; - if (DP->select_delay) + if (drive_params[current_drive].select_delay) return; saved_drive = current_drive; for (i = 0; i < N_DRIVE; i++) { drive = (saved_drive + i + 1) % N_DRIVE; - if (UDRS->fd_ref == 0 || UDP->select_delay != 0) + if (drive_state[drive].fd_ref == 0 || drive_params[drive].select_delay != 0) continue; /* skip closed drives */ set_fdc(drive); - if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & + if (!(set_dor(current_fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & (0x10 << UNIT(drive)))) /* switch the motor off again, if it was off to * begin with */ - set_dor(fdc, ~(0x10 << UNIT(drive)), 0); + set_dor(current_fdc, ~(0x10 << UNIT(drive)), 0); } set_fdc(saved_drive); } @@ -1008,7 +1016,8 @@ static void cancel_activity(void) * transfer */ static void fd_watchdog(void) { - debug_dcl(DP->flags, "calling disk change from watchdog\n"); + debug_dcl(drive_params[current_drive].flags, + "calling disk change from watchdog\n"); if (disk_change(current_drive)) { DPRINT("disk removed during i/o\n"); @@ -1032,7 +1041,7 @@ static void main_command_interrupt(void) static int fd_wait_for_completion(unsigned long expires, void (*function)(void)) { - if (FDCS->reset) { + if (fdc_state[current_fdc].reset) { reset_fdc(); /* do the reset during sleep to win time * if we don't need to sleep, it's a good * occasion anyways */ @@ -1060,13 +1069,13 @@ static void setup_DMA(void) pr_cont("%x,", raw_cmd->cmd[i]); pr_cont("\n"); cont->done(0); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return; } if (((unsigned long)raw_cmd->kernel_data) % 512) { pr_info("non aligned address: %p\n", raw_cmd->kernel_data); cont->done(0); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return; } f = claim_dma_lock(); @@ -1074,10 +1083,11 @@ static void setup_DMA(void) #ifdef fd_dma_setup if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length, (raw_cmd->flags & FD_RAW_READ) ? - DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) { + DMA_MODE_READ : DMA_MODE_WRITE, + fdc_state[current_fdc].address) < 0) { release_dma_lock(f); cont->done(0); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return; } release_dma_lock(f); @@ -1088,7 +1098,7 @@ static void setup_DMA(void) DMA_MODE_READ : DMA_MODE_WRITE); fd_set_dma_addr(raw_cmd->kernel_data); fd_set_dma_count(raw_cmd->length); - virtual_dma_port = FDCS->address; + virtual_dma_port = fdc_state[current_fdc].address; fd_enable_dma(); release_dma_lock(f); #endif @@ -1102,18 +1112,18 @@ static int wait_til_ready(void) int status; int counter; - if (FDCS->reset) + if (fdc_state[current_fdc].reset) return -1; for (counter = 0; counter < 10000; counter++) { - status = fd_inb(FD_STATUS); + status = fdc_inb(current_fdc, FD_STATUS); if (status & STATUS_READY) return status; } if (initialized) { - DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc); + DPRINT("Getstatus times out (%x) on fdc %d\n", status, current_fdc); show_floppy(); } - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return -1; } @@ -1126,17 +1136,17 @@ static int output_byte(char byte) return -1; if (is_ready_state(status)) { - fd_outb(byte, FD_DATA); + fdc_outb(byte, current_fdc, FD_DATA); output_log[output_log_pos].data = byte; output_log[output_log_pos].status = status; output_log[output_log_pos].jiffies = jiffies; output_log_pos = (output_log_pos + 1) % OLOGSIZE; return 0; } - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; if (initialized) { DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n", - byte, fdc, status); + byte, current_fdc, status); show_floppy(); } return -1; @@ -1159,16 +1169,16 @@ static int result(void) return i; } if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY)) - reply_buffer[i] = fd_inb(FD_DATA); + reply_buffer[i] = fdc_inb(current_fdc, FD_DATA); else break; } if (initialized) { DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n", - fdc, status, i); + current_fdc, status, i); show_floppy(); } - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return -1; } @@ -1205,7 +1215,7 @@ static void perpendicular_mode(void) default: DPRINT("Invalid data rate for perpendicular mode!\n"); cont->done(0); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; /* * convenient way to return to * redo without too much hassle @@ -1216,12 +1226,12 @@ static void perpendicular_mode(void) } else perp_mode = 0; - if (FDCS->perp_mode == perp_mode) + if (fdc_state[current_fdc].perp_mode == perp_mode) return; - if (FDCS->version >= FDC_82077_ORIG) { + if (fdc_state[current_fdc].version >= FDC_82077_ORIG) { output_byte(FD_PERPENDICULAR); output_byte(perp_mode); - FDCS->perp_mode = perp_mode; + fdc_state[current_fdc].perp_mode = perp_mode; } else if (perp_mode) { DPRINT("perpendicular mode not supported by this FDC.\n"); } @@ -1276,9 +1286,10 @@ static void fdc_specify(void) int hlt_max_code = 0x7f; int hut_max_code = 0xf; - if (FDCS->need_configure && FDCS->version >= FDC_82072A) { + if (fdc_state[current_fdc].need_configure && + fdc_state[current_fdc].version >= FDC_82072A) { fdc_configure(); - FDCS->need_configure = 0; + fdc_state[current_fdc].need_configure = 0; } switch (raw_cmd->rate & 0x03) { @@ -1287,7 +1298,7 @@ static void fdc_specify(void) break; case 1: dtr = 300; - if (FDCS->version >= FDC_82078) { + if (fdc_state[current_fdc].version >= FDC_82078) { /* chose the default rate table, not the one * where 1 = 2 Mbps */ output_byte(FD_DRIVESPEC); @@ -1302,27 +1313,30 @@ static void fdc_specify(void) break; } - if (FDCS->version >= FDC_82072) { + if (fdc_state[current_fdc].version >= FDC_82072) { scale_dtr = dtr; hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */ } /* Convert step rate from microseconds to milliseconds and 4 bits */ - srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR); + srt = 16 - DIV_ROUND_UP(drive_params[current_drive].srt * scale_dtr / 1000, + NOMINAL_DTR); if (slow_floppy) srt = srt / 4; SUPBOUND(srt, 0xf); INFBOUND(srt, 0); - hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR); + hlt = DIV_ROUND_UP(drive_params[current_drive].hlt * scale_dtr / 2, + NOMINAL_DTR); if (hlt < 0x01) hlt = 0x01; else if (hlt > 0x7f) hlt = hlt_max_code; - hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR); + hut = DIV_ROUND_UP(drive_params[current_drive].hut * scale_dtr / 16, + NOMINAL_DTR); if (hut < 0x1) hut = 0x1; else if (hut > 0xf) @@ -1332,11 +1346,12 @@ static void fdc_specify(void) spec2 = (hlt << 1) | (use_virtual_dma & 1); /* If these parameters did not change, just return with success */ - if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) { + if (fdc_state[current_fdc].spec1 != spec1 || + fdc_state[current_fdc].spec2 != spec2) { /* Go ahead and set spec1 and spec2 */ output_byte(FD_SPECIFY); - output_byte(FDCS->spec1 = spec1); - output_byte(FDCS->spec2 = spec2); + output_byte(fdc_state[current_fdc].spec1 = spec1); + output_byte(fdc_state[current_fdc].spec2 = spec2); } } /* fdc_specify */ @@ -1347,52 +1362,55 @@ static void fdc_specify(void) static int fdc_dtr(void) { /* If data rate not already set to desired value, set it. */ - if ((raw_cmd->rate & 3) == FDCS->dtr) + if ((raw_cmd->rate & 3) == fdc_state[current_fdc].dtr) return 0; /* Set dtr */ - fd_outb(raw_cmd->rate & 3, FD_DCR); + fdc_outb(raw_cmd->rate & 3, current_fdc, FD_DCR); /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB) * need a stabilization period of several milliseconds to be * enforced after data rate changes before R/W operations. * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) */ - FDCS->dtr = raw_cmd->rate & 3; + fdc_state[current_fdc].dtr = raw_cmd->rate & 3; return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready); } /* fdc_dtr */ static void tell_sector(void) { pr_cont(": track %d, head %d, sector %d, size %d", - R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE); + reply_buffer[R_TRACK], reply_buffer[R_HEAD], + reply_buffer[R_SECTOR], + reply_buffer[R_SIZECODE]); } /* tell_sector */ static void print_errors(void) { DPRINT(""); - if (ST0 & ST0_ECE) { + if (reply_buffer[ST0] & ST0_ECE) { pr_cont("Recalibrate failed!"); - } else if (ST2 & ST2_CRC) { + } else if (reply_buffer[ST2] & ST2_CRC) { pr_cont("data CRC error"); tell_sector(); - } else if (ST1 & ST1_CRC) { + } else if (reply_buffer[ST1] & ST1_CRC) { pr_cont("CRC error"); tell_sector(); - } else if ((ST1 & (ST1_MAM | ST1_ND)) || - (ST2 & ST2_MAM)) { + } else if ((reply_buffer[ST1] & (ST1_MAM | ST1_ND)) || + (reply_buffer[ST2] & ST2_MAM)) { if (!probing) { pr_cont("sector not found"); tell_sector(); } else pr_cont("probe failed..."); - } else if (ST2 & ST2_WC) { /* seek error */ + } else if (reply_buffer[ST2] & ST2_WC) { /* seek error */ pr_cont("wrong cylinder"); - } else if (ST2 & ST2_BC) { /* cylinder marked as bad */ + } else if (reply_buffer[ST2] & ST2_BC) { /* cylinder marked as bad */ pr_cont("bad cylinder"); } else { pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", - ST0, ST1, ST2); + reply_buffer[ST0], reply_buffer[ST1], + reply_buffer[ST2]); tell_sector(); } pr_cont("\n"); @@ -1411,33 +1429,35 @@ static int interpret_errors(void) if (inr != 7) { DPRINT("-- FDC reply error\n"); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return 1; } /* check IC to find cause of interrupt */ - switch (ST0 & ST0_INTR) { + switch (reply_buffer[ST0] & ST0_INTR) { case 0x40: /* error occurred during command execution */ - if (ST1 & ST1_EOC) + if (reply_buffer[ST1] & ST1_EOC) return 0; /* occurs with pseudo-DMA */ bad = 1; - if (ST1 & ST1_WP) { + if (reply_buffer[ST1] & ST1_WP) { DPRINT("Drive is write protected\n"); - clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); + clear_bit(FD_DISK_WRITABLE_BIT, + &drive_state[current_drive].flags); cont->done(0); bad = 2; - } else if (ST1 & ST1_ND) { - set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); - } else if (ST1 & ST1_OR) { - if (DP->flags & FTD_MSG) + } else if (reply_buffer[ST1] & ST1_ND) { + set_bit(FD_NEED_TWADDLE_BIT, + &drive_state[current_drive].flags); + } else if (reply_buffer[ST1] & ST1_OR) { + if (drive_params[current_drive].flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; - } else if (*errors >= DP->max_errors.reporting) { + } else if (*errors >= drive_params[current_drive].max_errors.reporting) { print_errors(); } - if (ST2 & ST2_WC || ST2 & ST2_BC) + if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC) /* wrong cylinder => recal */ - DRS->track = NEED_2_RECAL; + drive_state[current_drive].track = NEED_2_RECAL; return bad; case 0x80: /* invalid command given */ DPRINT("Invalid FDC command given!\n"); @@ -1470,13 +1490,13 @@ static void setup_rw_floppy(void) flags |= FD_RAW_INTR; if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) { - ready_date = DRS->spinup_date + DP->spinup; + ready_date = drive_state[current_drive].spinup_date + drive_params[current_drive].spinup; /* If spinup will take a long time, rerun scandrives * again just before spinup completion. Beware that * after scandrives, we must again wait for selection. */ - if (time_after(ready_date, jiffies + DP->select_delay)) { - ready_date -= DP->select_delay; + if (time_after(ready_date, jiffies + drive_params[current_drive].select_delay)) { + ready_date -= drive_params[current_drive].select_delay; function = floppy_start; } else function = setup_rw_floppy; @@ -1519,44 +1539,52 @@ static int blind_seek; static void seek_interrupt(void) { debugt(__func__, ""); - if (inr != 2 || (ST0 & 0xF8) != 0x20) { + if (inr != 2 || (reply_buffer[ST0] & 0xF8) != 0x20) { DPRINT("seek failed\n"); - DRS->track = NEED_2_RECAL; + drive_state[current_drive].track = NEED_2_RECAL; cont->error(); cont->redo(); return; } - if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) { - debug_dcl(DP->flags, + if (drive_state[current_drive].track >= 0 && + drive_state[current_drive].track != reply_buffer[ST1] && + !blind_seek) { + debug_dcl(drive_params[current_drive].flags, "clearing NEWCHANGE flag because of effective seek\n"); - debug_dcl(DP->flags, "jiffies=%lu\n", jiffies); - clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); + debug_dcl(drive_params[current_drive].flags, "jiffies=%lu\n", + jiffies); + clear_bit(FD_DISK_NEWCHANGE_BIT, + &drive_state[current_drive].flags); /* effective seek */ - DRS->select_date = jiffies; + drive_state[current_drive].select_date = jiffies; } - DRS->track = ST1; + drive_state[current_drive].track = reply_buffer[ST1]; floppy_ready(); } static void check_wp(void) { - if (test_bit(FD_VERIFY_BIT, &DRS->flags)) { + if (test_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags)) { /* check write protection */ output_byte(FD_GETSTATUS); output_byte(UNIT(current_drive)); if (result() != 1) { - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return; } - clear_bit(FD_VERIFY_BIT, &DRS->flags); - clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); - debug_dcl(DP->flags, + clear_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags); + clear_bit(FD_NEED_TWADDLE_BIT, + &drive_state[current_drive].flags); + debug_dcl(drive_params[current_drive].flags, "checking whether disk is write protected\n"); - debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40); - if (!(ST3 & 0x40)) - set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); + debug_dcl(drive_params[current_drive].flags, "wp=%x\n", + reply_buffer[ST3] & 0x40); + if (!(reply_buffer[ST3] & 0x40)) + set_bit(FD_DISK_WRITABLE_BIT, + &drive_state[current_drive].flags); else - clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); + clear_bit(FD_DISK_WRITABLE_BIT, + &drive_state[current_drive].flags); } } @@ -1566,32 +1594,34 @@ static void seek_floppy(void) blind_seek = 0; - debug_dcl(DP->flags, "calling disk change from %s\n", __func__); + debug_dcl(drive_params[current_drive].flags, + "calling disk change from %s\n", __func__); - if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && + if (!test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) && disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) { /* the media changed flag should be cleared after the seek. * If it isn't, this means that there is really no disk in * the drive. */ - set_bit(FD_DISK_CHANGED_BIT, &DRS->flags); + set_bit(FD_DISK_CHANGED_BIT, + &drive_state[current_drive].flags); cont->done(0); cont->redo(); return; } - if (DRS->track <= NEED_1_RECAL) { + if (drive_state[current_drive].track <= NEED_1_RECAL) { recalibrate_floppy(); return; - } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && + } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) && (raw_cmd->flags & FD_RAW_NEED_DISK) && - (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) { + (drive_state[current_drive].track <= NO_TRACK || drive_state[current_drive].track == raw_cmd->track)) { /* we seek to clear the media-changed condition. Does anybody * know a more elegant way, which works on all drives? */ if (raw_cmd->track) track = raw_cmd->track - 1; else { - if (DP->flags & FD_SILENT_DCL_CLEAR) { - set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0); + if (drive_params[current_drive].flags & FD_SILENT_DCL_CLEAR) { + set_dor(current_fdc, ~(0x10 << UNIT(current_drive)), 0); blind_seek = 1; raw_cmd->flags |= FD_RAW_NEED_SEEK; } @@ -1599,7 +1629,7 @@ static void seek_floppy(void) } } else { check_wp(); - if (raw_cmd->track != DRS->track && + if (raw_cmd->track != drive_state[current_drive].track && (raw_cmd->flags & FD_RAW_NEED_SEEK)) track = raw_cmd->track; else { @@ -1622,9 +1652,9 @@ static void recal_interrupt(void) { debugt(__func__, ""); if (inr != 2) - FDCS->reset = 1; - else if (ST0 & ST0_ECE) { - switch (DRS->track) { + fdc_state[current_fdc].reset = 1; + else if (reply_buffer[ST0] & ST0_ECE) { + switch (drive_state[current_drive].track) { case NEED_1_RECAL: debugt(__func__, "need 1 recal"); /* after a second recalibrate, we still haven't @@ -1642,11 +1672,12 @@ static void recal_interrupt(void) * not to move at recalibration is to * be already at track 0.) Clear the * new change flag */ - debug_dcl(DP->flags, + debug_dcl(drive_params[current_drive].flags, "clearing NEWCHANGE flag because of second recalibrate\n"); - clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); - DRS->select_date = jiffies; + clear_bit(FD_DISK_NEWCHANGE_BIT, + &drive_state[current_drive].flags); + drive_state[current_drive].select_date = jiffies; /* fall through */ default: debugt(__func__, "default"); @@ -1656,11 +1687,11 @@ static void recal_interrupt(void) * track 0, this might mean that we * started beyond track 80. Try * again. */ - DRS->track = NEED_1_RECAL; + drive_state[current_drive].track = NEED_1_RECAL; break; } } else - DRS->track = ST1; + drive_state[current_drive].track = reply_buffer[ST1]; floppy_ready(); } @@ -1690,20 +1721,20 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id) release_dma_lock(f); do_floppy = NULL; - if (fdc >= N_FDC || FDCS->address == -1) { + if (current_fdc >= N_FDC || fdc_state[current_fdc].address == -1) { /* we don't even know which FDC is the culprit */ pr_info("DOR0=%x\n", fdc_state[0].dor); - pr_info("floppy interrupt on bizarre fdc %d\n", fdc); + pr_info("floppy interrupt on bizarre fdc %d\n", current_fdc); pr_info("handler=%ps\n", handler); is_alive(__func__, "bizarre fdc"); return IRQ_NONE; } - FDCS->reset = 0; + fdc_state[current_fdc].reset = 0; /* We have to clear the reset flag here, because apparently on boxes * with level triggered interrupts (PS/2, Sparc, ...), it is needed to - * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the - * emission of the SENSEI's. + * emit SENSEI's to clear the interrupt line. And fdc_state[fdc].reset + * blocks the emission of the SENSEI's. * It is OK to emit floppy commands because we are in an interrupt * handler here, and thus we have to fear no interference of other * activity. @@ -1722,11 +1753,11 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id) if (do_print) print_result("sensei", inr); max_sensei--; - } while ((ST0 & 0x83) != UNIT(current_drive) && + } while ((reply_buffer[ST0] & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei); } if (!handler) { - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; return IRQ_NONE; } schedule_bh(handler); @@ -1752,7 +1783,7 @@ static void reset_interrupt(void) { debugt(__func__, ""); result(); /* get the status ready for set_fdc */ - if (FDCS->reset) { + if (fdc_state[current_fdc].reset) { pr_info("reset set in interrupt, calling %ps\n", cont->error); cont->error(); /* a reset just after a reset. BAD! */ } @@ -1768,7 +1799,7 @@ static void reset_fdc(void) unsigned long flags; do_floppy = reset_interrupt; - FDCS->reset = 0; + fdc_state[current_fdc].reset = 0; reset_fdc_info(0); /* Pseudo-DMA may intercept 'reset finished' interrupt. */ @@ -1778,12 +1809,13 @@ static void reset_fdc(void) fd_disable_dma(); release_dma_lock(flags); - if (FDCS->version >= FDC_82072A) - fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS); + if (fdc_state[current_fdc].version >= FDC_82072A) + fdc_outb(0x80 | (fdc_state[current_fdc].dtr & 3), + current_fdc, FD_STATUS); else { - fd_outb(FDCS->dor & ~0x04, FD_DOR); + fdc_outb(fdc_state[current_fdc].dor & ~0x04, current_fdc, FD_DOR); udelay(FD_RESET_DELAY); - fd_outb(FDCS->dor, FD_DOR); + fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR); } } @@ -1810,7 +1842,7 @@ static void show_floppy(void) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, reply_buffer, resultsize, true); - pr_info("status=%x\n", fd_inb(FD_STATUS)); + pr_info("status=%x\n", fdc_inb(current_fdc, FD_STATUS)); pr_info("fdc_busy=%lu\n", fdc_busy); if (do_floppy) pr_info("do_floppy=%ps\n", do_floppy); @@ -1847,7 +1879,7 @@ static void floppy_shutdown(struct work_struct *arg) if (initialized) DPRINT("floppy timeout called\n"); - FDCS->reset = 1; + fdc_state[current_fdc].reset = 1; if (cont) { cont->done(0); cont->redo(); /* this will recall reset when needed */ @@ -1867,29 +1899,29 @@ static int start_motor(void (*function)(void)) mask = 0xfc; data = UNIT(current_drive); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) { - if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) { + if (!(fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive)))) { set_debugt(); /* no read since this drive is running */ - DRS->first_read_date = 0; + drive_state[current_drive].first_read_date = 0; /* note motor start time if motor is not yet running */ - DRS->spinup_date = jiffies; + drive_state[current_drive].spinup_date = jiffies; data |= (0x10 << UNIT(current_drive)); } - } else if (FDCS->dor & (0x10 << UNIT(current_drive))) + } else if (fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive))) mask &= ~(0x10 << UNIT(current_drive)); /* starts motor and selects floppy */ del_timer(motor_off_timer + current_drive); - set_dor(fdc, mask, data); + set_dor(current_fdc, mask, data); /* wait_for_completion also schedules reset if needed. */ - return fd_wait_for_completion(DRS->select_date + DP->select_delay, + return fd_wait_for_completion(drive_state[current_drive].select_date + drive_params[current_drive].select_delay, function); } static void floppy_ready(void) { - if (FDCS->reset) { + if (fdc_state[current_fdc].reset) { reset_fdc(); return; } @@ -1898,9 +1930,10 @@ static void floppy_ready(void) if (fdc_dtr()) return; - debug_dcl(DP->flags, "calling disk change from floppy_ready\n"); + debug_dcl(drive_params[current_drive].flags, + "calling disk change from floppy_ready\n"); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) && - disk_change(current_drive) && !DP->select_delay) + disk_change(current_drive) && !drive_params[current_drive].select_delay) twaddle(); /* this clears the dcl on certain * drive/controller combinations */ @@ -1929,8 +1962,9 @@ static void floppy_start(void) reschedule_timeout(current_reqD, "floppy start"); scandrives(); - debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n"); - set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); + debug_dcl(drive_params[current_drive].flags, + "setting NEWCHANGE in floppy_start\n"); + set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags); floppy_ready(); } @@ -1988,7 +2022,7 @@ static int wait_til_done(void (*handler)(void), bool interruptible) return -EINTR; } - if (FDCS->reset) + if (fdc_state[current_fdc].reset) command_status = FD_COMMAND_ERROR; if (command_status == FD_COMMAND_OKAY) ret = 0; @@ -2029,14 +2063,14 @@ static int next_valid_format(void) { int probed_format; - probed_format = DRS->probed_format; + probed_format = drive_state[current_drive].probed_format; while (1) { - if (probed_format >= 8 || !DP->autodetect[probed_format]) { - DRS->probed_format = 0; + if (probed_format >= 8 || !drive_params[current_drive].autodetect[probed_format]) { + drive_state[current_drive].probed_format = 0; return 1; } - if (floppy_type[DP->autodetect[probed_format]].sect) { - DRS->probed_format = probed_format; + if (floppy_type[drive_params[current_drive].autodetect[probed_format]].sect) { + drive_state[current_drive].probed_format = probed_format; return 0; } probed_format++; @@ -2048,23 +2082,23 @@ static void bad_flp_intr(void) int err_count; if (probing) { - DRS->probed_format++; + drive_state[current_drive].probed_format++; if (!next_valid_format()) return; } err_count = ++(*errors); - INFBOUND(DRWE->badness, err_count); - if (err_count > DP->max_errors.abort) + INFBOUND(write_errors[current_drive].badness, err_count); + if (err_count > drive_params[current_drive].max_errors.abort) cont->done(0); - if (err_count > DP->max_errors.reset) - FDCS->reset = 1; - else if (err_count > DP->max_errors.recal) - DRS->track = NEED_2_RECAL; + if (err_count > drive_params[current_drive].max_errors.reset) + fdc_state[current_fdc].reset = 1; + else if (err_count > drive_params[current_drive].max_errors.recal) + drive_state[current_drive].track = NEED_2_RECAL; } static void set_floppy(int drive) { - int type = ITYPE(UDRS->fd_device); + int type = ITYPE(drive_state[drive].fd_device); if (type) _floppy = floppy_type + type; @@ -2110,28 +2144,28 @@ static void setup_format_params(int track) FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK); raw_cmd->rate = _floppy->rate & 0x43; raw_cmd->cmd_count = NR_F; - COMMAND = FM_MODE(_floppy, FD_FORMAT); - DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); - F_SIZECODE = FD_SIZECODE(_floppy); - F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE; - F_GAP = _floppy->fmt_gap; - F_FILL = FD_FILL_BYTE; + raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_FORMAT); + raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); + raw_cmd->cmd[F_SIZECODE] = FD_SIZECODE(_floppy); + raw_cmd->cmd[F_SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[F_SIZECODE]; + raw_cmd->cmd[F_GAP] = _floppy->fmt_gap; + raw_cmd->cmd[F_FILL] = FD_FILL_BYTE; raw_cmd->kernel_data = floppy_track_buffer; - raw_cmd->length = 4 * F_SECT_PER_TRACK; + raw_cmd->length = 4 * raw_cmd->cmd[F_SECT_PER_TRACK]; - if (!F_SECT_PER_TRACK) + if (!raw_cmd->cmd[F_SECT_PER_TRACK]) return; /* allow for about 30ms for data transport per track */ - head_shift = (F_SECT_PER_TRACK + 5) / 6; + head_shift = (raw_cmd->cmd[F_SECT_PER_TRACK] + 5) / 6; /* a ``cylinder'' is two tracks plus a little stepping time */ track_shift = 2 * head_shift + 3; /* position of logical sector 1 on this track */ n = (track_shift * format_req.track + head_shift * format_req.head) - % F_SECT_PER_TRACK; + % raw_cmd->cmd[F_SECT_PER_TRACK]; /* determine interleave */ il = 1; @@ -2139,27 +2173,27 @@ static void setup_format_params(int track) il++; /* initialize field */ - for (count = 0; count < F_SECT_PER_TRACK; ++count) { + for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) { here[count].track = format_req.track; here[count].head = format_req.head; here[count].sect = 0; - here[count].size = F_SIZECODE; + here[count].size = raw_cmd->cmd[F_SIZECODE]; } /* place logical sectors */ - for (count = 1; count <= F_SECT_PER_TRACK; ++count) { + for (count = 1; count <= raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) { here[n].sect = count; - n = (n + il) % F_SECT_PER_TRACK; + n = (n + il) % raw_cmd->cmd[F_SECT_PER_TRACK]; if (here[n].sect) { /* sector busy, find next free sector */ ++n; - if (n >= F_SECT_PER_TRACK) { - n -= F_SECT_PER_TRACK; + if (n >= raw_cmd->cmd[F_SECT_PER_TRACK]) { + n -= raw_cmd->cmd[F_SECT_PER_TRACK]; while (here[n].sect) ++n; } } } if (_floppy->stretch & FD_SECTBASEMASK) { - for (count = 0; count < F_SECT_PER_TRACK; count++) + for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; count++) here[count].sect += FD_SECTBASE(_floppy) - 1; } } @@ -2188,7 +2222,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req) set_floppy(drive); if (!_floppy || - _floppy->track > DP->tracks || + _floppy->track > drive_params[current_drive].tracks || tmp_format_req->track >= _floppy->track || tmp_format_req->head >= _floppy->head || (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) || @@ -2250,21 +2284,21 @@ static void request_done(int uptodate) /* maintain values for invalidation on geometry * change */ block = current_count_sectors + blk_rq_pos(req); - INFBOUND(DRS->maxblock, block); + INFBOUND(drive_state[current_drive].maxblock, block); if (block > _floppy->sect) - DRS->maxtrack = 1; + drive_state[current_drive].maxtrack = 1; floppy_end_request(req, 0); } else { if (rq_data_dir(req) == WRITE) { /* record write error information */ - DRWE->write_errors++; - if (DRWE->write_errors == 1) { - DRWE->first_error_sector = blk_rq_pos(req); - DRWE->first_error_generation = DRS->generation; + write_errors[current_drive].write_errors++; + if (write_errors[current_drive].write_errors == 1) { + write_errors[current_drive].first_error_sector = blk_rq_pos(req); + write_errors[current_drive].first_error_generation = drive_state[current_drive].generation; } - DRWE->last_error_sector = blk_rq_pos(req); - DRWE->last_error_generation = DRS->generation; + write_errors[current_drive].last_error_sector = blk_rq_pos(req); + write_errors[current_drive].last_error_generation = drive_state[current_drive].generation; } floppy_end_request(req, BLK_STS_IOERR); } @@ -2278,43 +2312,46 @@ static void rw_interrupt(void) int heads; int nr_sectors; - if (R_HEAD >= 2) { + if (reply_buffer[R_HEAD] >= 2) { /* some Toshiba floppy controllers occasionnally seem to * return bogus interrupts after read/write operations, which * can be recognized by a bad head number (>= 2) */ return; } - if (!DRS->first_read_date) - DRS->first_read_date = jiffies; + if (!drive_state[current_drive].first_read_date) + drive_state[current_drive].first_read_date = jiffies; nr_sectors = 0; - ssize = DIV_ROUND_UP(1 << SIZECODE, 4); + ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4); - if (ST1 & ST1_EOC) + if (reply_buffer[ST1] & ST1_EOC) eoc = 1; else eoc = 0; - if (COMMAND & 0x80) + if (raw_cmd->cmd[COMMAND] & 0x80) heads = 2; else heads = 1; - nr_sectors = (((R_TRACK - TRACK) * heads + - R_HEAD - HEAD) * SECT_PER_TRACK + - R_SECTOR - SECTOR + eoc) << SIZECODE >> 2; + nr_sectors = (((reply_buffer[R_TRACK] - raw_cmd->cmd[TRACK]) * heads + + reply_buffer[R_HEAD] - raw_cmd->cmd[HEAD]) * raw_cmd->cmd[SECT_PER_TRACK] + + reply_buffer[R_SECTOR] - raw_cmd->cmd[SECTOR] + eoc) << raw_cmd->cmd[SIZECODE] >> 2; if (nr_sectors / ssize > DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) { DPRINT("long rw: %x instead of %lx\n", nr_sectors, current_count_sectors); - pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR); - pr_info("rh=%d h=%d\n", R_HEAD, HEAD); - pr_info("rt=%d t=%d\n", R_TRACK, TRACK); + pr_info("rs=%d s=%d\n", reply_buffer[R_SECTOR], + raw_cmd->cmd[SECTOR]); + pr_info("rh=%d h=%d\n", reply_buffer[R_HEAD], + raw_cmd->cmd[HEAD]); + pr_info("rt=%d t=%d\n", reply_buffer[R_TRACK], + raw_cmd->cmd[TRACK]); pr_info("heads=%d eoc=%d\n", heads, eoc); pr_info("spt=%d st=%d ss=%d\n", - SECT_PER_TRACK, fsector_t, ssize); + raw_cmd->cmd[SECT_PER_TRACK], fsector_t, ssize); pr_info("in_sector_offset=%d\n", in_sector_offset); } @@ -2344,7 +2381,7 @@ static void rw_interrupt(void) } if (probing) { - if (DP->flags & FTD_MSG) + if (drive_params[current_drive].flags & FTD_MSG) DPRINT("Auto-detected floppy type %s in fd%d\n", _floppy->name, current_drive); current_type[current_drive] = _floppy; @@ -2352,11 +2389,11 @@ static void rw_interrupt(void) probing = 0; } - if (CT(COMMAND) != FD_READ || + if (CT(raw_cmd->cmd[COMMAND]) != FD_READ || raw_cmd->kernel_data == bio_data(current_req->bio)) { /* transfer directly from buffer */ cont->done(1); - } else if (CT(COMMAND) == FD_READ) { + } else if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) { buffer_track = raw_cmd->track; buffer_drive = current_drive; INFBOUND(buffer_max, nr_sectors + fsector_t); @@ -2415,13 +2452,13 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) min(max_sector, max_sector_2), blk_rq_sectors(current_req)); - if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && + if (current_count_sectors <= 0 && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE && buffer_max > fsector_t + blk_rq_sectors(current_req)) current_count_sectors = min_t(int, buffer_max - fsector_t, blk_rq_sectors(current_req)); remaining = current_count_sectors << 9; - if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) { + if (remaining > blk_rq_bytes(current_req) && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { DPRINT("in copy buffer\n"); pr_info("current_count_sectors=%ld\n", current_count_sectors); pr_info("remaining=%d\n", remaining >> 9); @@ -2456,16 +2493,16 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) fsector_t, buffer_min); pr_info("current_count_sectors=%ld\n", current_count_sectors); - if (CT(COMMAND) == FD_READ) + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) pr_info("read\n"); - if (CT(COMMAND) == FD_WRITE) + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) pr_info("write\n"); break; } if (((unsigned long)buffer) % 512) DPRINT("%p buffer not aligned\n", buffer); - if (CT(COMMAND) == FD_READ) + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) memcpy(buffer, dma_buffer, size); else memcpy(dma_buffer, buffer, size); @@ -2483,7 +2520,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) /* work around a bug in pseudo DMA * (on some FDCs) pseudo DMA does not stop when the CPU stops * sending data. Hence we need a different way to signal the - * transfer length: We use SECT_PER_TRACK. Unfortunately, this + * transfer length: We use raw_cmd->cmd[SECT_PER_TRACK]. Unfortunately, this * does not work with MT, hence we can only transfer one head at * a time */ @@ -2492,18 +2529,18 @@ static void virtualdmabug_workaround(void) int hard_sectors; int end_sector; - if (CT(COMMAND) == FD_WRITE) { - COMMAND &= ~0x80; /* switch off multiple track mode */ + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { + raw_cmd->cmd[COMMAND] &= ~0x80; /* switch off multiple track mode */ - hard_sectors = raw_cmd->length >> (7 + SIZECODE); - end_sector = SECTOR + hard_sectors - 1; - if (end_sector > SECT_PER_TRACK) { + hard_sectors = raw_cmd->length >> (7 + raw_cmd->cmd[SIZECODE]); + end_sector = raw_cmd->cmd[SECTOR] + hard_sectors - 1; + if (end_sector > raw_cmd->cmd[SECT_PER_TRACK]) { pr_info("too many sectors %d > %d\n", - end_sector, SECT_PER_TRACK); + end_sector, raw_cmd->cmd[SECT_PER_TRACK]); return; } - SECT_PER_TRACK = end_sector; - /* make sure SECT_PER_TRACK + raw_cmd->cmd[SECT_PER_TRACK] = end_sector; + /* make sure raw_cmd->cmd[SECT_PER_TRACK] * points to end of transfer */ } } @@ -2536,10 +2573,10 @@ static int make_raw_rw_request(void) raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; - COMMAND = FM_MODE(_floppy, FD_READ); + raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ); } else if (rq_data_dir(current_req) == WRITE) { raw_cmd->flags |= FD_RAW_WRITE; - COMMAND = FM_MODE(_floppy, FD_WRITE); + raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_WRITE); } else { DPRINT("%s: unknown command\n", __func__); return 0; @@ -2547,24 +2584,24 @@ static int make_raw_rw_request(void) max_sector = _floppy->sect * _floppy->head; - TRACK = (int)blk_rq_pos(current_req) / max_sector; + raw_cmd->cmd[TRACK] = (int)blk_rq_pos(current_req) / max_sector; fsector_t = (int)blk_rq_pos(current_req) % max_sector; - if (_floppy->track && TRACK >= _floppy->track) { + if (_floppy->track && raw_cmd->cmd[TRACK] >= _floppy->track) { if (blk_rq_cur_sectors(current_req) & 1) { current_count_sectors = 1; return 1; } else return 0; } - HEAD = fsector_t / _floppy->sect; + raw_cmd->cmd[HEAD] = fsector_t / _floppy->sect; if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) || - test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) && + test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags)) && fsector_t < _floppy->sect) max_sector = _floppy->sect; /* 2M disks have phantom sectors on the first track */ - if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) { + if ((_floppy->rate & FD_2M) && (!raw_cmd->cmd[TRACK]) && (!raw_cmd->cmd[HEAD])) { max_sector = 2 * _floppy->sect / 3; if (fsector_t >= max_sector) { current_count_sectors = @@ -2572,23 +2609,24 @@ static int make_raw_rw_request(void) blk_rq_sectors(current_req)); return 1; } - SIZECODE = 2; + raw_cmd->cmd[SIZECODE] = 2; } else - SIZECODE = FD_SIZECODE(_floppy); + raw_cmd->cmd[SIZECODE] = FD_SIZECODE(_floppy); raw_cmd->rate = _floppy->rate & 0x43; - if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2) + if ((_floppy->rate & FD_2M) && + (raw_cmd->cmd[TRACK] || raw_cmd->cmd[HEAD]) && raw_cmd->rate == 2) raw_cmd->rate = 1; - if (SIZECODE) - SIZECODE2 = 0xff; + if (raw_cmd->cmd[SIZECODE]) + raw_cmd->cmd[SIZECODE2] = 0xff; else - SIZECODE2 = 0x80; - raw_cmd->track = TRACK << STRETCH(_floppy); - DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD); - GAP = _floppy->gap; - ssize = DIV_ROUND_UP(1 << SIZECODE, 4); - SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; - SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + + raw_cmd->cmd[SIZECODE2] = 0x80; + raw_cmd->track = raw_cmd->cmd[TRACK] << STRETCH(_floppy); + raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, raw_cmd->cmd[HEAD]); + raw_cmd->cmd[GAP] = _floppy->gap; + ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4); + raw_cmd->cmd[SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[SIZECODE]; + raw_cmd->cmd[SECTOR] = ((fsector_t % _floppy->sect) << 2 >> raw_cmd->cmd[SIZECODE]) + FD_SECTBASE(_floppy); /* tracksize describes the size which can be filled up with sectors @@ -2596,24 +2634,24 @@ static int make_raw_rw_request(void) */ tracksize = _floppy->sect - _floppy->sect % ssize; if (tracksize < _floppy->sect) { - SECT_PER_TRACK++; + raw_cmd->cmd[SECT_PER_TRACK]++; if (tracksize <= fsector_t % _floppy->sect) - SECTOR--; + raw_cmd->cmd[SECTOR]--; /* if we are beyond tracksize, fill up using smaller sectors */ while (tracksize <= fsector_t % _floppy->sect) { while (tracksize + ssize > _floppy->sect) { - SIZECODE--; + raw_cmd->cmd[SIZECODE]--; ssize >>= 1; } - SECTOR++; - SECT_PER_TRACK++; + raw_cmd->cmd[SECTOR]++; + raw_cmd->cmd[SECT_PER_TRACK]++; tracksize += ssize; } - max_sector = HEAD * _floppy->sect + tracksize; - } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) { + max_sector = raw_cmd->cmd[HEAD] * _floppy->sect + tracksize; + } else if (!raw_cmd->cmd[TRACK] && !raw_cmd->cmd[HEAD] && !(_floppy->rate & FD_2M) && probing) { max_sector = _floppy->sect; - } else if (!HEAD && CT(COMMAND) == FD_WRITE) { + } else if (!raw_cmd->cmd[HEAD] && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { /* for virtual DMA bug workaround */ max_sector = _floppy->sect; } @@ -2625,12 +2663,12 @@ static int make_raw_rw_request(void) (current_drive == buffer_drive) && (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { /* data already in track buffer */ - if (CT(COMMAND) == FD_READ) { + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) { copy_buffer(1, max_sector, buffer_max); return 1; } } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { - if (CT(COMMAND) == FD_WRITE) { + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { unsigned int sectors; sectors = fsector_t + blk_rq_sectors(current_req); @@ -2641,7 +2679,7 @@ static int make_raw_rw_request(void) } raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; - COMMAND = FM_MODE(_floppy, FD_READ); + raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ); } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; @@ -2674,9 +2712,9 @@ static int make_raw_rw_request(void) */ if (!direct || (indirect * 2 > direct * 3 && - *errors < DP->max_errors.read_track && + *errors < drive_params[current_drive].max_errors.read_track && ((!probing || - (DP->read_track & (1 << DRS->probed_format)))))) { + (drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { raw_cmd->kernel_data = bio_data(current_req->bio); @@ -2692,7 +2730,7 @@ static int make_raw_rw_request(void) } } - if (CT(COMMAND) == FD_READ) + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) max_size = max_sector; /* unbounded */ /* claim buffer track if needed */ @@ -2700,7 +2738,7 @@ static int make_raw_rw_request(void) buffer_drive != current_drive || /* bad drive */ fsector_t > buffer_max || fsector_t < buffer_min || - ((CT(COMMAND) == FD_READ || + ((CT(raw_cmd->cmd[COMMAND]) == FD_READ || (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && max_sector > 2 * max_buffer_sectors + buffer_min && max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) { @@ -2712,7 +2750,7 @@ static int make_raw_rw_request(void) raw_cmd->kernel_data = floppy_track_buffer + ((aligned_sector_t - buffer_min) << 9); - if (CT(COMMAND) == FD_WRITE) { + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { /* copy write buffer to track buffer. * if we get here, we know that the write * is either aligned or the data already in the buffer @@ -2734,10 +2772,10 @@ static int make_raw_rw_request(void) raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || (raw_cmd->kernel_data != bio_data(current_req->bio) && - CT(COMMAND) == FD_WRITE && + CT(raw_cmd->cmd[COMMAND]) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || - raw_cmd->length % (128 << SIZECODE) || + raw_cmd->length % (128 << raw_cmd->cmd[SIZECODE]) || raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); @@ -2748,9 +2786,10 @@ static int make_raw_rw_request(void) current_count_sectors); pr_info("st=%d ast=%d mse=%d msi=%d\n", fsector_t, aligned_sector_t, max_sector, max_size); - pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE); + pr_info("ssize=%x SIZECODE=%d\n", ssize, raw_cmd->cmd[SIZECODE]); pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n", - COMMAND, SECTOR, HEAD, TRACK); + raw_cmd->cmd[COMMAND], raw_cmd->cmd[SECTOR], + raw_cmd->cmd[HEAD], raw_cmd->cmd[TRACK]); pr_info("buffer drive=%d\n", buffer_drive); pr_info("buffer track=%d\n", buffer_track); pr_info("buffer_min=%d\n", buffer_min); @@ -2769,9 +2808,9 @@ static int make_raw_rw_request(void) fsector_t, buffer_min, raw_cmd->length >> 9); pr_info("current_count_sectors=%ld\n", current_count_sectors); - if (CT(COMMAND) == FD_READ) + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) pr_info("read\n"); - if (CT(COMMAND) == FD_WRITE) + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) pr_info("write\n"); return 0; } @@ -2838,14 +2877,14 @@ do_request: disk_change(current_drive); if (test_bit(current_drive, &fake_change) || - test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) { + test_bit(FD_DISK_CHANGED_BIT, &drive_state[current_drive].flags)) { DPRINT("disk absent or changed during operation\n"); request_done(0); goto do_request; } if (!_floppy) { /* Autodetection */ if (!probing) { - DRS->probed_format = 0; + drive_state[current_drive].probed_format = 0; if (next_valid_format()) { DPRINT("no autodetectable formats\n"); _floppy = NULL; @@ -2854,7 +2893,7 @@ do_request: } } probing = 1; - _floppy = floppy_type + DP->autodetect[DRS->probed_format]; + _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format]; } else probing = 0; errors = &(current_req->error_count); @@ -2864,7 +2903,7 @@ do_request: goto do_request; } - if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) + if (test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags)) twaddle(); schedule_bh(floppy_start); debugt(__func__, "queue fd request"); @@ -2933,8 +2972,9 @@ static int poll_drive(bool interruptible, int flag) raw_cmd->track = 0; raw_cmd->cmd_count = 0; cont = &poll_cont; - debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n"); - set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); + debug_dcl(drive_params[current_drive].flags, + "setting NEWCHANGE in poll_drive\n"); + set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags); return wait_til_done(floppy_ready, interruptible); } @@ -2964,8 +3004,8 @@ static int user_reset_fdc(int drive, int arg, bool interruptible) return -EINTR; if (arg == FD_RESET_ALWAYS) - FDCS->reset = 1; - if (FDCS->reset) { + fdc_state[current_fdc].reset = 1; + if (fdc_state[current_fdc].reset) { cont = &reset_cont; ret = wait_til_done(reset_fdc, interruptible); if (ret == -EINTR) @@ -2998,8 +3038,8 @@ static const char *drive_name(int type, int drive) if (type) floppy = floppy_type + type; else { - if (UDP->native_format) - floppy = floppy_type + UDP->native_format; + if (drive_params[drive].native_format) + floppy = floppy_type + drive_params[drive].native_format; else return "(null)"; } @@ -3176,23 +3216,23 @@ static int raw_cmd_ioctl(int cmd, void __user *param) int ret2; int ret; - if (FDCS->rawcmd <= 1) - FDCS->rawcmd = 1; + if (fdc_state[current_fdc].rawcmd <= 1) + fdc_state[current_fdc].rawcmd = 1; for (drive = 0; drive < N_DRIVE; drive++) { - if (FDC(drive) != fdc) + if (FDC(drive) != current_fdc) continue; if (drive == current_drive) { - if (UDRS->fd_ref > 1) { - FDCS->rawcmd = 2; + if (drive_state[drive].fd_ref > 1) { + fdc_state[current_fdc].rawcmd = 2; break; } - } else if (UDRS->fd_ref) { - FDCS->rawcmd = 2; + } else if (drive_state[drive].fd_ref) { + fdc_state[current_fdc].rawcmd = 2; break; } } - if (FDCS->reset) + if (fdc_state[current_fdc].reset) return -EIO; ret = raw_cmd_copyin(cmd, param, &my_raw_cmd); @@ -3204,12 +3244,13 @@ static int raw_cmd_ioctl(int cmd, void __user *param) raw_cmd = my_raw_cmd; cont = &raw_cmd_cont; ret = wait_til_done(floppy_start, true); - debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n"); + debug_dcl(drive_params[current_drive].flags, + "calling disk change from raw_cmd ioctl\n"); - if (ret != -EINTR && FDCS->reset) + if (ret != -EINTR && fdc_state[current_fdc].reset) ret = -EIO; - DRS->track = NO_TRACK; + drive_state[current_drive].track = NO_TRACK; ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd); if (!ret) @@ -3237,9 +3278,9 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, (int)g->head <= 0 || /* check for overflow in max_sector */ (int)(g->sect * g->head) <= 0 || - /* check for zero in F_SECT_PER_TRACK */ + /* check for zero in raw_cmd->cmd[F_SECT_PER_TRACK] */ (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 || - g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || + g->track <= 0 || g->track > drive_params[drive].tracks >> STRETCH(g) || /* check if reserved bits are set */ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0) return -EINVAL; @@ -3282,16 +3323,16 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, current_type[drive] = &user_params[drive]; floppy_sizes[drive] = user_params[drive].size; if (cmd == FDDEFPRM) - DRS->keep_data = -1; + drive_state[current_drive].keep_data = -1; else - DRS->keep_data = 1; + drive_state[current_drive].keep_data = 1; /* invalidation. Invalidate only when needed, i.e. * when there are already sectors in the buffer cache * whose number will change. This is useful, because * mtools often changes the geometry of the disk after * looking at the boot block */ - if (DRS->maxblock > user_params[drive].sect || - DRS->maxtrack || + if (drive_state[current_drive].maxblock > user_params[drive].sect || + drive_state[current_drive].maxtrack || ((user_params[drive].sect ^ oldStretch) & (FD_SWAPSIDES | FD_SECTBASEMASK))) invalidate_drive(bdev); @@ -3404,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int unsigned long param) { int drive = (long)bdev->bd_disk->private_data; - int type = ITYPE(UDRS->fd_device); + int type = ITYPE(drive_state[drive].fd_device); int i; int ret; int size; @@ -3452,7 +3493,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int switch (cmd) { case FDEJECT: - if (UDRS->fd_ref != 1) + if (drive_state[drive].fd_ref != 1) /* somebody else has this drive open */ return -EBUSY; if (lock_fdc(drive)) @@ -3462,8 +3503,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int * non-Sparc architectures */ ret = fd_eject(UNIT(drive)); - set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); - set_bit(FD_VERIFY_BIT, &UDRS->flags); + set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags); + set_bit(FD_VERIFY_BIT, &drive_state[drive].flags); process_fd_request(); return ret; case FDCLRPRM: @@ -3471,7 +3512,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return -EINTR; current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; - UDRS->keep_data = 0; + drive_state[drive].keep_data = 0; return invalidate_drive(bdev); case FDSETPRM: case FDDEFPRM: @@ -3486,17 +3527,17 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = &inparam.g; break; case FDMSGON: - UDP->flags |= FTD_MSG; + drive_params[drive].flags |= FTD_MSG; return 0; case FDMSGOFF: - UDP->flags &= ~FTD_MSG; + drive_params[drive].flags &= ~FTD_MSG; return 0; case FDFMTBEG: if (lock_fdc(drive)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; - ret = UDRS->flags; + ret = drive_state[drive].flags; process_fd_request(); if (ret & FD_VERIFY) return -ENODEV; @@ -3504,7 +3545,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return -EROFS; return 0; case FDFMTTRK: - if (UDRS->fd_ref != 1) + if (drive_state[drive].fd_ref != 1) return -EBUSY; return do_format(drive, &inparam.f); case FDFMTEND: @@ -3513,13 +3554,13 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return -EINTR; return invalidate_drive(bdev); case FDSETEMSGTRESH: - UDP->max_errors.reporting = (unsigned short)(param & 0x0f); + drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f); return 0; case FDGETMAXERRS: - outparam = &UDP->max_errors; + outparam = &drive_params[drive].max_errors; break; case FDSETMAXERRS: - UDP->max_errors = inparam.max_errors; + drive_params[drive].max_errors = inparam.max_errors; break; case FDGETDRVTYP: outparam = drive_name(type, drive); @@ -3529,10 +3570,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int if (!valid_floppy_drive_params(inparam.dp.autodetect, inparam.dp.native_format)) return -EINVAL; - *UDP = inparam.dp; + drive_params[drive] = inparam.dp; break; case FDGETDRVPRM: - outparam = UDP; + outparam = &drive_params[drive]; break; case FDPOLLDRVSTAT: if (lock_fdc(drive)) @@ -3542,18 +3583,18 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int process_fd_request(); /* fall through */ case FDGETDRVSTAT: - outparam = UDRS; + outparam = &drive_state[drive]; break; case FDRESET: return user_reset_fdc(drive, (int)param, true); case FDGETFDCSTAT: - outparam = UFDCS; + outparam = &fdc_state[FDC(drive)]; break; case FDWERRORCLR: - memset(UDRWE, 0, sizeof(*UDRWE)); + memset(&write_errors[drive], 0, sizeof(write_errors[drive])); return 0; case FDWERRORGET: - outparam = UDRWE; + outparam = &write_errors[drive]; break; case FDRAWCMD: if (type) @@ -3689,7 +3730,7 @@ static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned mutex_lock(&floppy_mutex); drive = (long)bdev->bd_disk->private_data; - type = ITYPE(UDRS->fd_device); + type = ITYPE(drive_state[drive].fd_device); err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM, &v, drive, type, bdev); mutex_unlock(&floppy_mutex); @@ -3705,7 +3746,8 @@ static int compat_get_prm(int drive, memset(&v, 0, sizeof(v)); mutex_lock(&floppy_mutex); - err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p); + err = get_floppy_geometry(drive, ITYPE(drive_state[drive].fd_device), + &p); if (err) { mutex_unlock(&floppy_mutex); return err; @@ -3729,25 +3771,26 @@ static int compat_setdrvprm(int drive, if (!valid_floppy_drive_params(v.autodetect, v.native_format)) return -EINVAL; mutex_lock(&floppy_mutex); - UDP->cmos = v.cmos; - UDP->max_dtr = v.max_dtr; - UDP->hlt = v.hlt; - UDP->hut = v.hut; - UDP->srt = v.srt; - UDP->spinup = v.spinup; - UDP->spindown = v.spindown; - UDP->spindown_offset = v.spindown_offset; - UDP->select_delay = v.select_delay; - UDP->rps = v.rps; - UDP->tracks = v.tracks; - UDP->timeout = v.timeout; - UDP->interleave_sect = v.interleave_sect; - UDP->max_errors = v.max_errors; - UDP->flags = v.flags; - UDP->read_track = v.read_track; - memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect)); - UDP->checkfreq = v.checkfreq; - UDP->native_format = v.native_format; + drive_params[drive].cmos = v.cmos; + drive_params[drive].max_dtr = v.max_dtr; + drive_params[drive].hlt = v.hlt; + drive_params[drive].hut = v.hut; + drive_params[drive].srt = v.srt; + drive_params[drive].spinup = v.spinup; + drive_params[drive].spindown = v.spindown; + drive_params[drive].spindown_offset = v.spindown_offset; + drive_params[drive].select_delay = v.select_delay; + drive_params[drive].rps = v.rps; + drive_params[drive].tracks = v.tracks; + drive_params[drive].timeout = v.timeout; + drive_params[drive].interleave_sect = v.interleave_sect; + drive_params[drive].max_errors = v.max_errors; + drive_params[drive].flags = v.flags; + drive_params[drive].read_track = v.read_track; + memcpy(drive_params[drive].autodetect, v.autodetect, + sizeof(v.autodetect)); + drive_params[drive].checkfreq = v.checkfreq; + drive_params[drive].native_format = v.native_format; mutex_unlock(&floppy_mutex); return 0; } @@ -3759,25 +3802,26 @@ static int compat_getdrvprm(int drive, memset(&v, 0, sizeof(struct compat_floppy_drive_params)); mutex_lock(&floppy_mutex); - v.cmos = UDP->cmos; - v.max_dtr = UDP->max_dtr; - v.hlt = UDP->hlt; - v.hut = UDP->hut; - v.srt = UDP->srt; - v.spinup = UDP->spinup; - v.spindown = UDP->spindown; - v.spindown_offset = UDP->spindown_offset; - v.select_delay = UDP->select_delay; - v.rps = UDP->rps; - v.tracks = UDP->tracks; - v.timeout = UDP->timeout; - v.interleave_sect = UDP->interleave_sect; - v.max_errors = UDP->max_errors; - v.flags = UDP->flags; - v.read_track = UDP->read_track; - memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect)); - v.checkfreq = UDP->checkfreq; - v.native_format = UDP->native_format; + v.cmos = drive_params[drive].cmos; + v.max_dtr = drive_params[drive].max_dtr; + v.hlt = drive_params[drive].hlt; + v.hut = drive_params[drive].hut; + v.srt = drive_params[drive].srt; + v.spinup = drive_params[drive].spinup; + v.spindown = drive_params[drive].spindown; + v.spindown_offset = drive_params[drive].spindown_offset; + v.select_delay = drive_params[drive].select_delay; + v.rps = drive_params[drive].rps; + v.tracks = drive_params[drive].tracks; + v.timeout = drive_params[drive].timeout; + v.interleave_sect = drive_params[drive].interleave_sect; + v.max_errors = drive_params[drive].max_errors; + v.flags = drive_params[drive].flags; + v.read_track = drive_params[drive].read_track; + memcpy(v.autodetect, drive_params[drive].autodetect, + sizeof(v.autodetect)); + v.checkfreq = drive_params[drive].checkfreq; + v.native_format = drive_params[drive].native_format; mutex_unlock(&floppy_mutex); if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params))) @@ -3800,20 +3844,20 @@ static int compat_getdrvstat(int drive, bool poll, goto Eintr; process_fd_request(); } - v.spinup_date = UDRS->spinup_date; - v.select_date = UDRS->select_date; - v.first_read_date = UDRS->first_read_date; - v.probed_format = UDRS->probed_format; - v.track = UDRS->track; - v.maxblock = UDRS->maxblock; - v.maxtrack = UDRS->maxtrack; - v.generation = UDRS->generation; - v.keep_data = UDRS->keep_data; - v.fd_ref = UDRS->fd_ref; - v.fd_device = UDRS->fd_device; - v.last_checked = UDRS->last_checked; - v.dmabuf = (uintptr_t)UDRS->dmabuf; - v.bufblocks = UDRS->bufblocks; + v.spinup_date = drive_state[drive].spinup_date; + v.select_date = drive_state[drive].select_date; + v.first_read_date = drive_state[drive].first_read_date; + v.probed_format = drive_state[drive].probed_format; + v.track = drive_state[drive].track; + v.maxblock = drive_state[drive].maxblock; + v.maxtrack = drive_state[drive].maxtrack; + v.generation = drive_state[drive].generation; + v.keep_data = drive_state[drive].keep_data; + v.fd_ref = drive_state[drive].fd_ref; + v.fd_device = drive_state[drive].fd_device; + v.last_checked = drive_state[drive].last_checked; + v.dmabuf = (uintptr_t) drive_state[drive].dmabuf; + v.bufblocks = drive_state[drive].bufblocks; mutex_unlock(&floppy_mutex); if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) @@ -3831,7 +3875,7 @@ static int compat_getfdcstat(int drive, struct floppy_fdc_state v; mutex_lock(&floppy_mutex); - v = *UFDCS; + v = fdc_state[FDC(drive)]; mutex_unlock(&floppy_mutex); memset(&v32, 0, sizeof(struct compat_floppy_fdc_state)); @@ -3861,7 +3905,7 @@ static int compat_werrorget(int drive, memset(&v32, 0, sizeof(struct compat_floppy_write_errors)); mutex_lock(&floppy_mutex); - v = *UDRWE; + v = write_errors[drive]; mutex_unlock(&floppy_mutex); v32.write_errors = v.write_errors; v32.first_error_sector = v.first_error_sector; @@ -3930,16 +3974,16 @@ static void __init config_types(void) /* read drive info out of physical CMOS */ drive = 0; - if (!UDP->cmos) - UDP->cmos = FLOPPY0_TYPE; + if (!drive_params[drive].cmos) + drive_params[drive].cmos = FLOPPY0_TYPE; drive = 1; - if (!UDP->cmos) - UDP->cmos = FLOPPY1_TYPE; + if (!drive_params[drive].cmos) + drive_params[drive].cmos = FLOPPY1_TYPE; /* FIXME: additional physical CMOS drive detection should go here */ for (drive = 0; drive < N_DRIVE; drive++) { - unsigned int type = UDP->cmos; + unsigned int type = drive_params[drive].cmos; struct floppy_drive_params *params; const char *name = NULL; char temparea[32]; @@ -3969,7 +4013,7 @@ static void __init config_types(void) pr_cont("%s fd%d is %s", prepend, drive, name); } - *UDP = *params; + drive_params[drive] = *params; } if (has_drive) @@ -3982,11 +4026,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode) mutex_lock(&floppy_mutex); mutex_lock(&open_lock); - if (!UDRS->fd_ref--) { + if (!drive_state[drive].fd_ref--) { DPRINT("floppy_release with fd_ref == 0"); - UDRS->fd_ref = 0; + drive_state[drive].fd_ref = 0; } - if (!UDRS->fd_ref) + if (!drive_state[drive].fd_ref) opened_bdev[drive] = NULL; mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); @@ -4007,16 +4051,16 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) mutex_lock(&floppy_mutex); mutex_lock(&open_lock); - old_dev = UDRS->fd_device; + old_dev = drive_state[drive].fd_device; if (opened_bdev[drive] && opened_bdev[drive] != bdev) goto out2; - if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) { - set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); - set_bit(FD_VERIFY_BIT, &UDRS->flags); + if (!drive_state[drive].fd_ref && (drive_params[drive].flags & FD_BROKEN_DCL)) { + set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags); + set_bit(FD_VERIFY_BIT, &drive_state[drive].flags); } - UDRS->fd_ref++; + drive_state[drive].fd_ref++; opened_bdev[drive] = bdev; @@ -4025,7 +4069,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (!floppy_track_buffer) { /* if opening an ED drive, reserve a big buffer, * else reserve a small one */ - if ((UDP->cmos == 6) || (UDP->cmos == 5)) + if ((drive_params[drive].cmos == 6) || (drive_params[drive].cmos == 5)) try = 64; /* Only 48 actually useful */ else try = 32; /* Only 24 actually useful */ @@ -4053,38 +4097,39 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) } new_dev = MINOR(bdev->bd_dev); - UDRS->fd_device = new_dev; + drive_state[drive].fd_device = new_dev; set_capacity(disks[drive], floppy_sizes[new_dev]); if (old_dev != -1 && old_dev != new_dev) { if (buffer_drive == drive) buffer_track = -1; } - if (UFDCS->rawcmd == 1) - UFDCS->rawcmd = 2; + if (fdc_state[FDC(drive)].rawcmd == 1) + fdc_state[FDC(drive)].rawcmd = 2; if (!(mode & FMODE_NDELAY)) { if (mode & (FMODE_READ|FMODE_WRITE)) { - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + drive_state[drive].last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, + &drive_state[drive].flags); check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags)) goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags)) goto out; } res = -EROFS; if ((mode & FMODE_WRITE) && - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags)) goto out; } mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; out: - UDRS->fd_ref--; + drive_state[drive].fd_ref--; - if (!UDRS->fd_ref) + if (!drive_state[drive].fd_ref) opened_bdev[drive] = NULL; out2: mutex_unlock(&open_lock); @@ -4100,19 +4145,19 @@ static unsigned int floppy_check_events(struct gendisk *disk, { int drive = (long)disk->private_data; - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || - test_bit(FD_VERIFY_BIT, &UDRS->flags)) + if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) || + test_bit(FD_VERIFY_BIT, &drive_state[drive].flags)) return DISK_EVENT_MEDIA_CHANGE; - if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { + if (time_after(jiffies, drive_state[drive].last_checked + drive_params[drive].checkfreq)) { if (lock_fdc(drive)) return 0; poll_drive(false, 0); process_fd_request(); } - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || - test_bit(FD_VERIFY_BIT, &UDRS->flags) || + if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) || + test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) return DISK_EVENT_MEDIA_CHANGE; @@ -4138,7 +4183,7 @@ static void floppy_rb0_cb(struct bio *bio) if (bio->bi_status) { pr_info("floppy: error %d while reading block 0\n", bio->bi_status); - set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + set_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags); } complete(&cbdata->complete); } @@ -4195,8 +4240,8 @@ static int floppy_revalidate(struct gendisk *disk) int cf; int res = 0; - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || - test_bit(FD_VERIFY_BIT, &UDRS->flags) || + if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) || + test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) { if (WARN(atomic_read(&usage_count) == 0, @@ -4206,20 +4251,20 @@ static int floppy_revalidate(struct gendisk *disk) res = lock_fdc(drive); if (res) return res; - cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || - test_bit(FD_VERIFY_BIT, &UDRS->flags)); + cf = (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) || + test_bit(FD_VERIFY_BIT, &drive_state[drive].flags)); if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { process_fd_request(); /*already done by another thread */ return 0; } - UDRS->maxblock = 0; - UDRS->maxtrack = 0; + drive_state[drive].maxblock = 0; + drive_state[drive].maxtrack = 0; if (buffer_drive == drive) buffer_track = -1; clear_bit(drive, &fake_change); - clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); + clear_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags); if (cf) - UDRS->generation++; + drive_state[drive].generation++; if (drive_no_geom(drive)) { /* auto-sensing */ res = __floppy_read_block_0(opened_bdev[drive], drive); @@ -4229,7 +4274,7 @@ static int floppy_revalidate(struct gendisk *disk) process_fd_request(); } } - set_capacity(disk, floppy_sizes[UDRS->fd_device]); + set_capacity(disk, floppy_sizes[drive_state[drive].fd_device]); return res; } @@ -4258,23 +4303,23 @@ static char __init get_fdc_version(void) int r; output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */ - if (FDCS->reset) + if (fdc_state[current_fdc].reset) return FDC_NONE; r = result(); if (r <= 0x00) return FDC_NONE; /* No FDC present ??? */ if ((r == 1) && (reply_buffer[0] == 0x80)) { - pr_info("FDC %d is an 8272A\n", fdc); + pr_info("FDC %d is an 8272A\n", current_fdc); return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ } if (r != 10) { pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n", - fdc, r); + current_fdc, r); return FDC_UNKNOWN; } if (!fdc_configure()) { - pr_info("FDC %d is an 82072\n", fdc); + pr_info("FDC %d is an 82072\n", current_fdc); return FDC_82072; /* 82072 doesn't know CONFIGURE */ } @@ -4282,50 +4327,50 @@ static char __init get_fdc_version(void) if (need_more_output() == MORE_OUTPUT) { output_byte(0); } else { - pr_info("FDC %d is an 82072A\n", fdc); + pr_info("FDC %d is an 82072A\n", current_fdc); return FDC_82072A; /* 82072A as found on Sparcs. */ } output_byte(FD_UNLOCK); r = result(); if ((r == 1) && (reply_buffer[0] == 0x80)) { - pr_info("FDC %d is a pre-1991 82077\n", fdc); + pr_info("FDC %d is a pre-1991 82077\n", current_fdc); return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know * LOCK/UNLOCK */ } if ((r != 1) || (reply_buffer[0] != 0x00)) { pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", - fdc, r); + current_fdc, r); return FDC_UNKNOWN; } output_byte(FD_PARTID); r = result(); if (r != 1) { pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n", - fdc, r); + current_fdc, r); return FDC_UNKNOWN; } if (reply_buffer[0] == 0x80) { - pr_info("FDC %d is a post-1991 82077\n", fdc); + pr_info("FDC %d is a post-1991 82077\n", current_fdc); return FDC_82077; /* Revised 82077AA passes all the tests */ } switch (reply_buffer[0] >> 5) { case 0x0: /* Either a 82078-1 or a 82078SL running at 5Volt */ - pr_info("FDC %d is an 82078.\n", fdc); + pr_info("FDC %d is an 82078.\n", current_fdc); return FDC_82078; case 0x1: - pr_info("FDC %d is a 44pin 82078\n", fdc); + pr_info("FDC %d is a 44pin 82078\n", current_fdc); return FDC_82078; case 0x2: - pr_info("FDC %d is a S82078B\n", fdc); + pr_info("FDC %d is a S82078B\n", current_fdc); return FDC_S82078B; case 0x3: - pr_info("FDC %d is a National Semiconductor PC87306\n", fdc); + pr_info("FDC %d is a National Semiconductor PC87306\n", current_fdc); return FDC_87306; default: pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n", - fdc, reply_buffer[0] >> 5); + current_fdc, reply_buffer[0] >> 5); return FDC_82078_UNKN; } } /* get_fdc_version */ @@ -4381,7 +4426,7 @@ static void __init set_cmos(int *ints, int dummy, int dummy2) if (current_drive >= 4 && !FDC2) FDC2 = 0x370; #endif - DP->cmos = ints[2]; + drive_params[current_drive].cmos = ints[2]; DPRINT("setting CMOS code to %d\n", ints[2]); } @@ -4470,7 +4515,7 @@ static ssize_t floppy_cmos_show(struct device *dev, int drive; drive = p->id; - return sprintf(buf, "%X\n", UDP->cmos); + return sprintf(buf, "%X\n", drive_params[drive].cmos); } static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL); @@ -4491,7 +4536,7 @@ static int floppy_resume(struct device *dev) int fdc; for (fdc = 0; fdc < N_FDC; fdc++) - if (FDCS->address != -1) + if (fdc_state[fdc].address != -1) user_reset_fdc(-1, FD_RESET_ALWAYS, false); return 0; @@ -4601,16 +4646,16 @@ static int __init do_floppy_init(void) config_types(); for (i = 0; i < N_FDC; i++) { - fdc = i; - memset(FDCS, 0, sizeof(*FDCS)); - FDCS->dtr = -1; - FDCS->dor = 0x4; + current_fdc = i; + memset(&fdc_state[current_fdc], 0, sizeof(*fdc_state)); + fdc_state[current_fdc].dtr = -1; + fdc_state[current_fdc].dor = 0x4; #if defined(__sparc__) || defined(__mc68000__) /*sparcs/sun3x don't have a DOR reset which we can fall back on to */ #ifdef __mc68000__ if (MACH_IS_SUN3X) #endif - FDCS->version = FDC_82072A; + fdc_state[current_fdc].version = FDC_82072A; #endif } @@ -4625,7 +4670,7 @@ static int __init do_floppy_init(void) fdc_state[1].address = FDC2; #endif - fdc = 0; /* reset fdc in case of unexpected interrupt */ + current_fdc = 0; /* reset fdc in case of unexpected interrupt */ err = floppy_grab_irq_and_dma(); if (err) { cancel_delayed_work(&fd_timeout); @@ -4635,12 +4680,12 @@ static int __init do_floppy_init(void) /* initialise drive state */ for (drive = 0; drive < N_DRIVE; drive++) { - memset(UDRS, 0, sizeof(*UDRS)); - memset(UDRWE, 0, sizeof(*UDRWE)); - set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); - set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); - set_bit(FD_VERIFY_BIT, &UDRS->flags); - UDRS->fd_device = -1; + memset(&drive_state[drive], 0, sizeof(drive_state[drive])); + memset(&write_errors[drive], 0, sizeof(write_errors[drive])); + set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags); + set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags); + set_bit(FD_VERIFY_BIT, &drive_state[drive].flags); + drive_state[drive].fd_device = -1; floppy_track_buffer = NULL; max_buffer_sectors = 0; } @@ -4652,29 +4697,30 @@ static int __init do_floppy_init(void) msleep(10); for (i = 0; i < N_FDC; i++) { - fdc = i; - FDCS->driver_version = FD_DRIVER_VERSION; + current_fdc = i; + fdc_state[current_fdc].driver_version = FD_DRIVER_VERSION; for (unit = 0; unit < 4; unit++) - FDCS->track[unit] = 0; - if (FDCS->address == -1) + fdc_state[current_fdc].track[unit] = 0; + if (fdc_state[current_fdc].address == -1) continue; - FDCS->rawcmd = 2; + fdc_state[current_fdc].rawcmd = 2; if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) { /* free ioports reserved by floppy_grab_irq_and_dma() */ - floppy_release_regions(fdc); - FDCS->address = -1; - FDCS->version = FDC_NONE; + floppy_release_regions(current_fdc); + fdc_state[current_fdc].address = -1; + fdc_state[current_fdc].version = FDC_NONE; continue; } /* Try to determine the floppy controller type */ - FDCS->version = get_fdc_version(); - if (FDCS->version == FDC_NONE) { + fdc_state[current_fdc].version = get_fdc_version(); + if (fdc_state[current_fdc].version == FDC_NONE) { /* free ioports reserved by floppy_grab_irq_and_dma() */ - floppy_release_regions(fdc); - FDCS->address = -1; + floppy_release_regions(current_fdc); + fdc_state[current_fdc].address = -1; continue; } - if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A) + if (can_use_virtual_dma == 2 && + fdc_state[current_fdc].version < FDC_82072A) can_use_virtual_dma = 0; have_no_fdc = 0; @@ -4684,7 +4730,7 @@ static int __init do_floppy_init(void) */ user_reset_fdc(-1, FD_RESET_ALWAYS, false); } - fdc = 0; + current_fdc = 0; cancel_delayed_work(&fd_timeout); current_drive = 0; initialized = true; @@ -4780,7 +4826,7 @@ static void floppy_release_allocated_regions(int fdc, const struct io_region *p) { while (p != io_regions) { p--; - release_region(FDCS->address + p->offset, p->size); + release_region(fdc_state[fdc].address + p->offset, p->size); } } @@ -4791,10 +4837,10 @@ static int floppy_request_regions(int fdc) const struct io_region *p; for (p = io_regions; p < ARRAY_END(io_regions); p++) { - if (!request_region(FDCS->address + p->offset, + if (!request_region(fdc_state[fdc].address + p->offset, p->size, "floppy")) { DPRINT("Floppy io-port 0x%04lx in use\n", - FDCS->address + p->offset); + fdc_state[fdc].address + p->offset); floppy_release_allocated_regions(fdc, p); return -EBUSY; } @@ -4836,36 +4882,36 @@ static int floppy_grab_irq_and_dma(void) } } - for (fdc = 0; fdc < N_FDC; fdc++) { - if (FDCS->address != -1) { - if (floppy_request_regions(fdc)) + for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) { + if (fdc_state[current_fdc].address != -1) { + if (floppy_request_regions(current_fdc)) goto cleanup; } } - for (fdc = 0; fdc < N_FDC; fdc++) { - if (FDCS->address != -1) { + for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) { + if (fdc_state[current_fdc].address != -1) { reset_fdc_info(1); - fd_outb(FDCS->dor, FD_DOR); + fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR); } } - fdc = 0; + current_fdc = 0; set_dor(0, ~0, 8); /* avoid immediate interrupt */ - for (fdc = 0; fdc < N_FDC; fdc++) - if (FDCS->address != -1) - fd_outb(FDCS->dor, FD_DOR); + for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) + if (fdc_state[current_fdc].address != -1) + fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR); /* * The driver will try and free resources and relies on us * to know if they were allocated or not. */ - fdc = 0; + current_fdc = 0; irqdma_allocated = 1; return 0; cleanup: fd_free_irq(); fd_free_dma(); - while (--fdc >= 0) - floppy_release_regions(fdc); + while (--current_fdc >= 0) + floppy_release_regions(current_fdc); atomic_dec(&usage_count); return -1; } @@ -4913,11 +4959,11 @@ static void floppy_release_irq_and_dma(void) pr_info("auxiliary floppy timer still active\n"); if (work_pending(&floppy_work)) pr_info("work still pending\n"); - old_fdc = fdc; - for (fdc = 0; fdc < N_FDC; fdc++) - if (FDCS->address != -1) - floppy_release_regions(fdc); - fdc = old_fdc; + old_fdc = current_fdc; + for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) + if (fdc_state[current_fdc].address != -1) + floppy_release_regions(current_fdc); + current_fdc = old_fdc; } #ifdef MODULE diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 739b372a5112..a42c49e04954 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -214,7 +214,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup * will get updated by ioctl(LOOP_GET_STATUS) */ - blk_mq_freeze_queue(lo->lo_queue); + if (lo->lo_state == Lo_bound) + blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); @@ -223,7 +224,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } - blk_mq_unfreeze_queue(lo->lo_queue); + if (lo->lo_state == Lo_bound) + blk_mq_unfreeze_queue(lo->lo_queue); } static int @@ -1539,16 +1541,16 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) return -EINVAL; - if (lo->lo_queue->limits.logical_block_size != arg) { - sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); - } + if (lo->lo_queue->limits.logical_block_size == arg) + return 0; + + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); /* kill_bdev should have truncated all the pages */ - if (lo->lo_queue->limits.logical_block_size != arg && - lo->lo_device->bd_inode->i_mapping->nrpages) { + if (lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", __func__, lo->lo_number, lo->lo_file_name, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 78181908f0df..43cff01a5a67 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -395,16 +395,19 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } config = nbd->config; - if (config->num_connections > 1) { + if (config->num_connections > 1 || + (config->num_connections == 1 && nbd->tag_set.timeout)) { dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out, retrying (%d/%d alive)\n", atomic_read(&config->live_connections), config->num_connections); /* * Hooray we have more connections, requeue this IO, the submit - * path will put it on a real connection. + * path will put it on a real connection. Or if only one + * connection is configured, the submit path will wait util + * a new connection is reconfigured or util dead timeout. */ - if (config->socks && config->num_connections > 1) { + if (config->socks) { if (cmd->index < config->num_connections) { struct nbd_sock *nsock = config->socks[cmd->index]; @@ -431,12 +434,22 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, * Userspace sets timeout=0 to disable socket disconnection, * so just warn and reset the timer. */ + struct nbd_sock *nsock = config->socks[cmd->index]; cmd->retries++; dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); + mutex_lock(&nsock->tx_lock); + if (cmd->cookie != nsock->cookie) { + nbd_requeue_cmd(cmd); + mutex_unlock(&nsock->tx_lock); + mutex_unlock(&cmd->lock); + nbd_config_put(nbd); + return BLK_EH_DONE; + } + mutex_unlock(&nsock->tx_lock); mutex_unlock(&cmd->lock); nbd_config_put(nbd); return BLK_EH_RESET_TIMER; @@ -741,14 +754,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); /* - * If we've disconnected or we only have 1 - * connection then we need to make sure we + * If we've disconnected, we need to make sure we * complete this request, otherwise error out * and let the timeout stuff handle resubmitting * this request onto another connection. */ - if (nbd_disconnected(config) || - config->num_connections <= 1) { + if (nbd_disconnected(config)) { cmd->status = BLK_STS_IOERR; goto out; } @@ -825,7 +836,7 @@ static int find_fallback(struct nbd_device *nbd, int index) if (config->num_connections <= 1) { dev_err_ratelimited(disk_to_dev(nbd->disk), - "Attempted send on invalid socket\n"); + "Dead connection, failed to find a fallback\n"); return new_index; } diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index bc837862b767..62b660821dbc 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -14,9 +14,6 @@ #include <linux/fault-inject.h> struct nullb_cmd { - struct list_head list; - struct llist_node ll_list; - struct __call_single_data csd; struct request *rq; struct bio *bio; unsigned int tag; diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 16510795e377..4e1c0712278e 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -23,6 +23,7 @@ #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static DECLARE_FAULT_ATTR(null_timeout_attr); static DECLARE_FAULT_ATTR(null_requeue_attr); +static DECLARE_FAULT_ATTR(null_init_hctx_attr); #endif static inline u64 mb_per_tick(int mbps) @@ -96,11 +97,21 @@ module_param_named(home_node, g_home_node, int, 0444); MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION +/* + * For more details about fault injection, please refer to + * Documentation/fault-injection/fault-injection.rst. + */ static char g_timeout_str[80]; module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); +MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); static char g_requeue_str[80]; module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); +MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>"); + +static char g_init_hctx_str[80]; +module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444); +MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>"); #endif static int g_queue_mode = NULL_Q_MQ; @@ -276,7 +287,7 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \ { \ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\ struct nullb_device *dev = to_nullb_device(item); \ - TYPE uninitialized_var(new_value); \ + TYPE new_value = 0; \ int ret; \ \ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\ @@ -302,6 +313,12 @@ static int nullb_apply_submit_queues(struct nullb_device *dev, if (!nullb) return 0; + /* + * Make sure that null_init_hctx() does not access nullb->queues[] past + * the end of that array. + */ + if (submit_queues > nr_cpu_ids) + return -EINVAL; set = nullb->tag_set; blk_mq_update_nr_hw_queues(set, submit_queues); return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM; @@ -605,6 +622,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) if (tag != -1U) { cmd = &nq->cmds[tag]; cmd->tag = tag; + cmd->error = BLK_STS_OK; cmd->nq = nq; if (nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, @@ -1385,6 +1403,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->timer.function = null_cmd_timer_expired; } cmd->rq = bd->rq; + cmd->error = BLK_STS_OK; cmd->nq = nq; blk_mq_start_request(bd->rq); @@ -1408,12 +1427,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); } -static const struct blk_mq_ops null_mq_ops = { - .queue_rq = null_queue_rq, - .complete = null_complete_rq, - .timeout = null_timeout_rq, -}; - static void cleanup_queue(struct nullb_queue *nq) { kfree(nq->tag_map); @@ -1430,9 +1443,56 @@ static void cleanup_queues(struct nullb *nullb) kfree(nullb->queues); } +static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +{ + struct nullb_queue *nq = hctx->driver_data; + struct nullb *nullb = nq->dev->nullb; + + nullb->nr_queues--; +} + +static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) +{ + init_waitqueue_head(&nq->wait); + nq->queue_depth = nullb->queue_depth; + nq->dev = nullb->dev; +} + +static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, + unsigned int hctx_idx) +{ + struct nullb *nullb = hctx->queue->queuedata; + struct nullb_queue *nq; + +#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION + if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1)) + return -EFAULT; +#endif + + nq = &nullb->queues[hctx_idx]; + hctx->driver_data = nq; + null_init_queue(nullb, nq); + nullb->nr_queues++; + + return 0; +} + +static const struct blk_mq_ops null_mq_ops = { + .queue_rq = null_queue_rq, + .complete = null_complete_rq, + .timeout = null_timeout_rq, + .init_hctx = null_init_hctx, + .exit_hctx = null_exit_hctx, +}; + static void null_del_dev(struct nullb *nullb) { - struct nullb_device *dev = nullb->dev; + struct nullb_device *dev; + + if (!nullb) + return; + + dev = nullb->dev; ida_simple_remove(&nullb_indexes, nullb->index); @@ -1473,33 +1533,6 @@ static const struct block_device_operations null_ops = { .report_zones = null_report_zones, }; -static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) -{ - BUG_ON(!nullb); - BUG_ON(!nq); - - init_waitqueue_head(&nq->wait); - nq->queue_depth = nullb->queue_depth; - nq->dev = nullb->dev; -} - -static void null_init_queues(struct nullb *nullb) -{ - struct request_queue *q = nullb->q; - struct blk_mq_hw_ctx *hctx; - struct nullb_queue *nq; - int i; - - queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx->nr_ctx || !hctx->tags) - continue; - nq = &nullb->queues[i]; - hctx->driver_data = nq; - null_init_queue(nullb, nq); - nullb->nr_queues++; - } -} - static int setup_commands(struct nullb_queue *nq) { struct nullb_cmd *cmd; @@ -1518,8 +1551,6 @@ static int setup_commands(struct nullb_queue *nq) for (i = 0; i < nq->queue_depth; i++) { cmd = &nq->cmds[i]; - INIT_LIST_HEAD(&cmd->list); - cmd->ll_list.next = NULL; cmd->tag = -1U; } @@ -1528,8 +1559,7 @@ static int setup_commands(struct nullb_queue *nq) static int setup_queues(struct nullb *nullb) { - nullb->queues = kcalloc(nullb->dev->submit_queues, - sizeof(struct nullb_queue), + nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue), GFP_KERNEL); if (!nullb->queues) return -ENOMEM; @@ -1671,6 +1701,8 @@ static bool null_setup_fault(void) return false; if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) return false; + if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str)) + return false; #endif return true; } @@ -1714,19 +1746,17 @@ static int null_add_dev(struct nullb_device *dev) goto out_cleanup_queues; nullb->tag_set->timeout = 5 * HZ; - nullb->q = blk_mq_init_queue(nullb->tag_set); + nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb); if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } - null_init_queues(nullb); } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); + nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } - blk_queue_make_request(nullb->q, null_queue_bio); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; @@ -1790,6 +1820,7 @@ out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); + dev->nullb = NULL; out: return rv; } diff --git a/drivers/block/null_blk_trace.c b/drivers/block/null_blk_trace.c new file mode 100644 index 000000000000..f246e7bff698 --- /dev/null +++ b/drivers/block/null_blk_trace.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * null_blk trace related helpers. + * + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#include "null_blk_trace.h" + +/* + * Helper to use for all null_blk traces to extract disk name. + */ +const char *nullb_trace_disk_name(struct trace_seq *p, char *name) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (name && *name) + trace_seq_printf(p, "disk=%s, ", name); + trace_seq_putc(p, 0); + + return ret; +} diff --git a/drivers/block/null_blk_trace.h b/drivers/block/null_blk_trace.h new file mode 100644 index 000000000000..4f83032eb544 --- /dev/null +++ b/drivers/block/null_blk_trace.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * null_blk device driver tracepoints. + * + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nullb + +#if !defined(_TRACE_NULLB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NULLB_H + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +#include "null_blk.h" + +const char *nullb_trace_disk_name(struct trace_seq *p, char *name); + +#define __print_disk_name(name) nullb_trace_disk_name(p, name) + +#ifndef TRACE_HEADER_MULTI_READ +static inline void __assign_disk_name(char *name, struct gendisk *disk) +{ + if (disk) + memcpy(name, disk->disk_name, DISK_NAME_LEN); + else + memset(name, 0, DISK_NAME_LEN); +} +#endif + +TRACE_EVENT(nullb_zone_op, + TP_PROTO(struct nullb_cmd *cmd, unsigned int zone_no, + unsigned int zone_cond), + TP_ARGS(cmd, zone_no, zone_cond), + TP_STRUCT__entry( + __array(char, disk, DISK_NAME_LEN) + __field(enum req_opf, op) + __field(unsigned int, zone_no) + __field(unsigned int, zone_cond) + ), + TP_fast_assign( + __entry->op = req_op(cmd->rq); + __entry->zone_no = zone_no; + __entry->zone_cond = zone_cond; + __assign_disk_name(__entry->disk, cmd->rq->rq_disk); + ), + TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s", + __print_disk_name(__entry->disk), + blk_op_str(__entry->op), + __entry->zone_no, + blk_zone_cond_str(__entry->zone_cond)) +); + +TRACE_EVENT(nullb_report_zones, + TP_PROTO(struct nullb *nullb, unsigned int nr_zones), + TP_ARGS(nullb, nr_zones), + TP_STRUCT__entry( + __array(char, disk, DISK_NAME_LEN) + __field(unsigned int, nr_zones) + ), + TP_fast_assign( + __entry->nr_zones = nr_zones; + __assign_disk_name(__entry->disk, nullb->disk); + ), + TP_printk("%s nr_zones=%u", + __print_disk_name(__entry->disk), __entry->nr_zones) +); + +#endif /* _TRACE_NULLB_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE null_blk_trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index ed34785dd64b..673618d8222a 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -2,6 +2,9 @@ #include <linux/vmalloc.h> #include "null_blk.h" +#define CREATE_TRACE_POINTS +#include "null_blk_trace.h" + /* zone_size in MBs to sectors. */ #define ZONE_SIZE_SHIFT 11 @@ -80,6 +83,8 @@ int null_report_zones(struct gendisk *disk, sector_t sector, return 0; nr_zones = min(nr_zones, dev->nr_zones - first_zone); + trace_nullb_report_zones(nullb, nr_zones); + for (i = 0; i < nr_zones; i++) { /* * Stacked DM target drivers will remap the zone information by @@ -148,6 +153,8 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, /* Invalid zone condition */ return BLK_STS_IOERR; } + + trace_nullb_zone_op(cmd, zno, zone->cond); return BLK_STS_OK; } @@ -155,7 +162,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, sector_t sector) { struct nullb_device *dev = cmd->nq->dev; - struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; + unsigned int zone_no = null_zone_no(dev, sector); + struct blk_zone *zone = &dev->zones[zone_no]; size_t i; switch (op) { @@ -203,6 +211,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, default: return BLK_STS_NOTSUPP; } + + trace_nullb_zone_op(cmd, zone_no, zone->cond); return BLK_STS_OK; } diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 117cfc8cd05a..cda5cf917e9a 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -276,7 +276,7 @@ static const struct block_device_operations pcd_bdops = { .release = pcd_block_release, .ioctl = pcd_block_ioctl, #ifdef CONFIG_COMPAT - .ioctl = blkdev_compat_ptr_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, #endif .check_events = pcd_block_check_events, }; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 5f970a7d32c0..0b944ac96d6b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2493,7 +2493,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd) { struct request_queue *q = pd->disk->queue; - blk_queue_make_request(q, pkt_make_request); blk_queue_logical_block_size(q, CD_FRAMESIZE); blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); q->queuedata = pd; @@ -2679,6 +2678,11 @@ static unsigned int pkt_check_events(struct gendisk *disk, return attached_disk->fops->check_events(attached_disk, clearing); } +static char *pkt_devnode(struct gendisk *disk, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); +} + static const struct block_device_operations pktcdvd_ops = { .owner = THIS_MODULE, .open = pkt_open, @@ -2686,13 +2690,9 @@ static const struct block_device_operations pktcdvd_ops = { .ioctl = pkt_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = pkt_check_events, + .devnode = pkt_devnode, }; -static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name); -} - /* * Set up mapping from pktcdvd device to CD-ROM device. */ @@ -2748,9 +2748,8 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) disk->fops = &pktcdvd_ops; disk->flags = GENHD_FL_REMOVABLE; strcpy(disk->disk_name, pd->name); - disk->devnode = pktcdvd_devnode; disk->private_data = pd; - disk->queue = blk_alloc_queue(GFP_KERNEL); + disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE); if (!disk->queue) goto out_mem2; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 4628e1a27a2b..821d4d8b1d76 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -737,7 +737,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) ps3vram_proc_init(dev); - queue = blk_alloc_queue(GFP_KERNEL); + queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE); if (!queue) { dev_err(&dev->core, "blk_alloc_queue failed\n"); error = -ENOMEM; @@ -746,7 +746,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) priv->queue = queue; queue->queuedata = dev; - blk_queue_make_request(queue, ps3vram_make_request); blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index c47d28b2ce44..8ffa8260dcaf 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -248,7 +248,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) return -ENOMEM; } - card->queue = blk_alloc_queue(GFP_KERNEL); + card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE); if (!card->queue) { dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); unregister_blkdev(card->major, DRIVER_NAME); @@ -269,7 +269,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) blk_queue_logical_block_size(card->queue, blk_size); } - blk_queue_make_request(card->queue, rsxx_make_request); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 111eb659e66d..1914f5488b22 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -80,7 +80,7 @@ struct dma_tracker { struct dma_tracker_list { spinlock_t lock; int head; - struct dma_tracker list[0]; + struct dma_tracker list[]; }; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4eaf97d7a170..d84e8a878df2 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -885,11 +885,9 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) card->biotail = &card->bio; spin_lock_init(&card->lock); - card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE); if (!card->queue) goto failed_alloc; - - blk_queue_make_request(card->queue, mm_make_request); card->queue->queuedata = card; tasklet_init(&card->tasklet, process_page, (unsigned long)card); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 54158766334b..f9b1e70f1b31 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); - blk_mq_stop_hw_queue(hctx); + /* Don't stop the queue if -ENOMEM: we may have failed to + * bounce the buffer due to global resource outage. + */ + if (err == -ENOSPC) + blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - /* Out of mem doesn't actually happen, since we fall back - * to direct descriptors */ - if (err == -ENOMEM || err == -ENOSPC) + switch (err) { + case -ENOSPC: return BLK_STS_DEV_RESOURCE; - return BLK_STS_IOERR; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) @@ -381,18 +388,15 @@ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) cap_str_10, cap_str_2); - set_capacity(vblk->disk, capacity); + set_capacity_revalidate_and_notify(vblk->disk, capacity, true); } static void virtblk_config_changed_work(struct work_struct *work) { struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); - char *envp[] = { "RESIZE=1", NULL }; virtblk_update_capacity(vblk, true); - revalidate_disk(vblk->disk); - kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); } static void virtblk_config_changed(struct virtio_device *vdev) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e2ad6bba2281..915cf5b6388c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -213,6 +213,7 @@ struct blkfront_info struct blk_mq_tag_set tag_set; struct blkfront_ring_info *rinfo; unsigned int nr_rings; + unsigned int rinfo_size; /* Save uncomplete reqs and bios for migration. */ struct list_head requests; struct bio_list bio_list; @@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static void blkfront_gather_backend_features(struct blkfront_info *info); static int negotiate_mq(struct blkfront_info *info); +#define for_each_rinfo(info, ptr, idx) \ + for ((ptr) = (info)->rinfo, (idx) = 0; \ + (idx) < (info)->nr_rings; \ + (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size) + +static inline struct blkfront_ring_info * +get_rinfo(const struct blkfront_info *info, unsigned int i) +{ + BUG_ON(i >= info->nr_rings); + return (void *)info->rinfo + i * info->rinfo_size; +} + static int get_id_from_freelist(struct blkfront_ring_info *rinfo) { unsigned long free = rinfo->shadow_free; @@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, struct blkfront_info *info = hctx->queue->queuedata; struct blkfront_ring_info *rinfo = NULL; - BUG_ON(info->nr_rings <= qid); - rinfo = &info->rinfo[qid]; + rinfo = get_rinfo(info, qid); blk_mq_start_request(qd->rq); spin_lock_irqsave(&rinfo->ring_lock, flags); if (RING_FULL(&rinfo->ring)) @@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, static void xlvbd_release_gendisk(struct blkfront_info *info) { unsigned int minor, nr_minors, i; + struct blkfront_ring_info *rinfo; if (info->rq == NULL) return; @@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; - + for_each_rinfo(info, rinfo, i) { /* No more gnttab callback work. */ gnttab_cancel_free_callback(&rinfo->callback); @@ -1339,6 +1350,7 @@ free_shadow: static void blkif_free(struct blkfront_info *info, int suspend) { unsigned int i; + struct blkfront_ring_info *rinfo; /* Prevent new requests being issued until we fix things up. */ info->connected = suspend ? @@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) if (info->rq) blk_mq_stop_hw_queues(info->rq); - for (i = 0; i < info->nr_rings; i++) - blkif_free_ring(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) + blkif_free_ring(rinfo); kvfree(info->rinfo); info->rinfo = NULL; @@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev, int err; unsigned int i, max_page_order; unsigned int ring_page_order; + struct blkfront_ring_info *rinfo; if (!info) return -ENODEV; @@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev, if (err) goto destroy_blkring; - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; - + for_each_rinfo(info, rinfo, i) { /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, rinfo); if (err) @@ -1815,7 +1826,7 @@ again: /* We already got the number of queues/rings in _probe */ if (info->nr_rings == 1) { - err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename); + err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); if (err) goto destroy_blkring; } else { @@ -1837,10 +1848,10 @@ again: goto abort_transaction; } - for (i = 0; i < info->nr_rings; i++) { + for_each_rinfo(info, rinfo, i) { memset(path, 0, pathsize); snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i); - err = write_per_ring_nodes(xbt, &info->rinfo[i], path); + err = write_per_ring_nodes(xbt, rinfo, path); if (err) { kfree(path); goto destroy_blkring; @@ -1868,9 +1879,8 @@ again: goto destroy_blkring; } - for (i = 0; i < info->nr_rings; i++) { + for_each_rinfo(info, rinfo, i) { unsigned int j; - struct blkfront_ring_info *rinfo = &info->rinfo[i]; for (j = 0; j < BLK_RING_SIZE(info); j++) rinfo->shadow[j].req.u.rw.id = j + 1; @@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info) { unsigned int backend_max_queues; unsigned int i; + struct blkfront_ring_info *rinfo; BUG_ON(info->nr_rings); @@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info) if (!info->nr_rings) info->nr_rings = 1; - info->rinfo = kvcalloc(info->nr_rings, - struct_size(info->rinfo, shadow, - BLK_RING_SIZE(info)), - GFP_KERNEL); + info->rinfo_size = struct_size(info->rinfo, shadow, + BLK_RING_SIZE(info)); + info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); info->nr_rings = 0; return -ENOMEM; } - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { INIT_LIST_HEAD(&rinfo->indirect_pages); INIT_LIST_HEAD(&rinfo->grants); rinfo->dev_info = info; @@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info) int rc; struct bio *bio; unsigned int segs; + struct blkfront_ring_info *rinfo; blkfront_gather_backend_features(info); /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ @@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info) segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); - for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; - + for_each_rinfo(info, rinfo, r_index) { rc = blkfront_setup_indirect(rinfo); if (rc) return rc; @@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info) /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; - for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[r_index]; + for_each_rinfo(info, rinfo, r_index) { /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(rinfo); } @@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev) struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err = 0; unsigned int i, j; + struct blkfront_ring_info *rinfo; dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); bio_list_init(&info->bio_list); INIT_LIST_HEAD(&info->requests); - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { struct bio_list merge_bio; struct blk_shadow *shadow = rinfo->shadow; @@ -2335,8 +2338,8 @@ static void blkfront_connect(struct blkfront_info *info) unsigned long sector_size; unsigned int physical_sector_size; unsigned int binfo; - char *envp[] = { "RESIZE=1", NULL }; int err, i; + struct blkfront_ring_info *rinfo; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -2350,10 +2353,7 @@ static void blkfront_connect(struct blkfront_info *info) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); - set_capacity(info->gd, sectors); - revalidate_disk(info->gd); - kobject_uevent_env(&disk_to_dev(info->gd)->kobj, - KOBJ_CHANGE, envp); + set_capacity_revalidate_and_notify(info->gd, sectors, true); return; case BLKIF_STATE_SUSPENDED: @@ -2394,8 +2394,8 @@ static void blkfront_connect(struct blkfront_info *info) "physical-sector-size", sector_size); blkfront_gather_backend_features(info); - for (i = 0; i < info->nr_rings; i++) { - err = blkfront_setup_indirect(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) { + err = blkfront_setup_indirect(rinfo); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); @@ -2416,8 +2416,8 @@ static void blkfront_connect(struct blkfront_info *info) /* Kick pending requests. */ info->connected = BLKIF_STATE_CONNECTED; - for (i = 0; i < info->nr_rings; i++) - kick_pending_request_queues(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) + kick_pending_request_queues(rinfo); device_add_disk(&info->xbdev->dev, info->gd, NULL); @@ -2652,9 +2652,9 @@ static void purge_persistent_grants(struct blkfront_info *info) { unsigned int i; unsigned long flags; + struct blkfront_ring_info *rinfo; - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { struct grant *gnt_list_entry, *tmp; spin_lock_irqsave(&rinfo->ring_lock, flags); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 1bdb5793842b..ebb234f36909 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -33,6 +33,7 @@ #include <linux/sysfs.h> #include <linux/debugfs.h> #include <linux/cpuhotplug.h> +#include <linux/part_stat.h> #include "zram_drv.h" @@ -1894,7 +1895,7 @@ static int zram_add(void) #ifdef CONFIG_ZRAM_WRITEBACK spin_lock_init(&zram->wb_limit_lock); #endif - queue = blk_alloc_queue(GFP_KERNEL); + queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE); if (!queue) { pr_err("Error allocating disk queue for device %d\n", device_id); @@ -1902,8 +1903,6 @@ static int zram_add(void) goto out_free_idr; } - blk_queue_make_request(queue, zram_make_request); - /* gendisk structure */ zram->disk = alloc_disk(1); if (!zram->disk) { diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index be79d6c6a4e4..1bb00a959c67 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, if (ret) goto unlock; - *buf = readl(rsb->regs + RSB_DATA); + *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); unlock: mutex_unlock(&rsb->lock); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f702c85c81b6..440019655fbb 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1266,6 +1266,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, @@ -1294,7 +1296,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), @@ -1400,7 +1401,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata) } /* 1-wire needs module's internal clocks enabled for reset */ -static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata) +static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) { int offset = 0x0c; /* HDQ_CTRL_STATUS */ u16 val; @@ -1488,7 +1489,7 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w; + ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; return; } diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 886b2638c730..c51292c2a131 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -519,7 +519,7 @@ static const struct block_device_operations gdrom_bdops = { .check_events = gdrom_bdops_check_events, .ioctl = gdrom_bdops_ioctl, #ifdef CONFIG_COMPAT - .ioctl = blkdev_compat_ptr_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, #endif }; diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 594aee281977..b40edae32817 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -775,7 +775,7 @@ int __init agp_amd64_init(void) } /* First check that we have at least one AMD64 NB */ - if (!pci_dev_present(amd_nb_misc_ids)) { + if (!amd_nb_num()) { pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index ffe9b0c6c647..39943bc3651a 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -209,20 +209,19 @@ static int __init mod_init(void) out: return err; } +module_init(mod_init); static void __exit mod_exit(void) { hwrng_unregister(&via_rng); } - -module_init(mod_init); module_exit(mod_exit); static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), {} }; +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index c78127ccbc0d..638c693e17ad 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev) else io.slave_addr = slave_addr; - io.irq = platform_get_irq(pdev, 0); + io.irq = platform_get_irq_optional(pdev, 0); if (io.irq > 0) io.irq_setup = ipmi_std_irq_setup; else @@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev) io.irq = tmp; io.irq_setup = acpi_gpe_irq_setup; } else { - int irq = platform_get_irq(pdev, 0); + int irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { io.irq = irq; diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 5a0d99d4fec0..9567e5197f74 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -21,9 +21,11 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o tpm-$(CONFIG_OF) += eventlog/of.o obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o obj-$(CONFIG_TCG_TIS) += tpm_tis.o -obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o -tpm_tis_spi_mod-y := tpm_tis_spi.o -tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o + +obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o +tpm_tis_spi-y := tpm_tis_spi_main.o +tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o + obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 7a0fca659b6a..7460f230bae4 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -99,11 +99,8 @@ static int tpm_read_log(struct tpm_chip *chip) * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. - * - * Returns -ENODEV if the firmware has no event log or securityfs is not - * supported. */ -int tpm_bios_log_setup(struct tpm_chip *chip) +void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; @@ -112,7 +109,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip) rc = tpm_read_log(chip); if (rc < 0) - return rc; + return; log_version = rc; cnt = 0; @@ -158,13 +155,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip) cnt++; } - return 0; + return; err: - rc = PTR_ERR(chip->bios_dir[cnt]); chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); - return rc; + return; } void tpm_bios_log_teardown(struct tpm_chip *chip) diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c index af347c190819..a9ce66d09a75 100644 --- a/drivers/char/tpm/eventlog/of.c +++ b/drivers/char/tpm/eventlog/of.c @@ -51,7 +51,8 @@ int tpm_read_log_of(struct tpm_chip *chip) * endian format. For this reason, vtpm doesn't need conversion * but physical tpm needs the conversion. */ - if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) { + if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 && + of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) { size = be32_to_cpup((__force __be32 *)sizep); base = be64_to_cpup((__force __be64 *)basep); } else { diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 739b1d9d16b6..2c96977ad080 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, u32 converted_event_size; u32 converted_event_type; + (*pos)++; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; @@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, ((v + sizeof(struct tcpa_event) + converted_event_size) > limit)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index b9aeda1cbcd7..e741b1157525 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, size_t event_size; void *marker; + (*pos)++; event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { @@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, if (((v + event_size) >= limit) || (event_size == 0)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 3d6d394a8661..58073836b555 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -596,9 +596,7 @@ int tpm_chip_register(struct tpm_chip *chip) tpm_sysfs_add_device(chip); - rc = tpm_bios_log_setup(chip); - if (rc != 0 && rc != -ENODEV) - return rc; + tpm_bios_log_setup(chip); tpm_add_ppi(chip); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 5620747da0cf..0fbcede241ea 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -226,6 +226,7 @@ int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); +int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); int tpm2_init_space(struct tpm_space *space); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); @@ -235,7 +236,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); -int tpm_bios_log_setup(struct tpm_chip *chip); +void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); int tpm_dev_common_init(void); void tpm_dev_common_exit(void); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 13696deceae8..76f67b155bd5 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -525,6 +525,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index) return 0; } + bank->crypto_id = HASH_ALGO__LAST; + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size); } @@ -613,7 +615,7 @@ out: return rc; } -static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) +int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) { struct tpm_buf buf; u32 nr_commands; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 78cc52690177..1a49db9e108e 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -29,6 +29,7 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; static const struct vio_device_id tpm_ibmvtpm_device_table[] = { { "IBM,vtpm", "IBM,vtpm"}, + { "IBM,vtpm", "IBM,vtpm20"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); @@ -571,6 +572,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); + wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } @@ -618,6 +620,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); @@ -670,6 +673,20 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup; + if (!strcmp(id->compat, "IBM,vtpm20")) { + chip->flags |= TPM_CHIP_FLAG_TPM2; + rc = tpm2_get_cc_attrs_tbl(chip); + if (rc) + goto init_irq_cleanup; + } + + if (!wait_event_timeout(ibmvtpm->crq_queue.wq, + ibmvtpm->rtce_buf != NULL, + HZ)) { + dev_err(dev, "CRQ response timed out\n"); + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index 7983f1a33267..b92aa7d3e93e 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue { struct ibmvtpm_crq *crq_addr; u32 index; u32 num_entry; + wait_queue_head_t wq; }; struct ibmvtpm_dev { diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c index 37d72e818335..ea759af25634 100644 --- a/drivers/char/tpm/tpm_tis_spi_cr50.c +++ b/drivers/char/tpm/tpm_tis_spi_cr50.c @@ -132,7 +132,12 @@ static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy) if (cr50_needs_waking(cr50_phy)) { /* Assert CS, wait 1 msec, deassert CS */ - struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 }; + struct spi_transfer spi_cs_wake = { + .delay = { + .value = 1000, + .unit = SPI_DELAY_UNIT_USECS + } + }; spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1); /* Wait for it to fully wake */ diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi_main.c index d1754fd6c573..d96755935529 100644 --- a/drivers/char/tpm/tpm_tis_spi.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -110,7 +110,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, spi_xfer.cs_change = 0; spi_xfer.len = transfer_len; - spi_xfer.delay_usecs = 5; + spi_xfer.delay.value = 5; + spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; if (in) { spi_xfer.tx_buf = NULL; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f0f2b599fd7e..95adf6c6db3d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name); * * Returns: The number of clocks that are possible parents of this node */ -unsigned int of_clk_get_parent_count(struct device_node *np) +unsigned int of_clk_get_parent_count(const struct device_node *np) { int count; @@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np) } EXPORT_SYMBOL_GPL(of_clk_get_parent_count); -const char *of_clk_get_parent_name(struct device_node *np, int index) +const char *of_clk_get_parent_name(const struct device_node *np, int index) { struct of_phandle_args clkspec; struct property *prop; diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index f6c120cca0d4..cf192907b7dc 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -560,7 +560,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00); hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80); hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00); - hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80); + hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80); hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00); hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80); hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00); @@ -686,7 +686,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0); hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0); hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0); - hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0); + hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0); hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0); hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0); hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0); diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index fbef740704d0..b8b2072742a5 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -43,12 +43,12 @@ struct imx_sc_msg_req_set_clock_rate { __le32 rate; __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct req_get_clock_rate { __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct resp_get_clock_rate { __le32 rate; @@ -84,7 +84,7 @@ struct imx_sc_msg_get_clock_parent { struct req_get_clock_parent { __le16 resource; u8 clk; - } __packed req; + } __packed __aligned(4) req; struct resp_get_clock_parent { u8 parent; } resp; @@ -121,7 +121,7 @@ struct imx_sc_msg_req_clock_enable { u8 clk; u8 enable; u8 autog; -} __packed; +} __packed __aligned(4); static inline struct clk_scu *to_clk_scu(struct clk_hw *hw) { diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index dd7af41e47eb..0a5d395bce93 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = { }, }; -static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { - .halt_reg = 0x400c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x400c, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "disp_cc_mdss_rscc_ahb_clk", - .parent_data = &(const struct clk_parent_data){ - .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { .halt_reg = 0x4008, .halt_check = BRANCH_HALT, @@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = { [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr, [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr, - [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c index c363c3cc544e..276e5ecd4840 100644 --- a/drivers/clk/qcom/videocc-sc7180.c +++ b/drivers/clk/qcom/videocc-sc7180.c @@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = { static struct clk_branch video_cc_vcodec0_core_clk = { .halt_reg = 0x890, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x890, .enable_mask = BIT(0), diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index af3e7805769e..e5538d577ce5 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -78,7 +78,7 @@ static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst }; static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = { - { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" }, { 0 }, }; diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index a86859ccc61b..09aa44cb8a91 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -343,7 +343,8 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) static u64 read_hv_sched_clock_tsc(void) { - return read_hv_clock_tsc() - hv_sched_clock_offset; + return (read_hv_clock_tsc() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static void suspend_hv_clock_tsc(struct clocksource *arg) @@ -405,7 +406,8 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) static u64 read_hv_sched_clock_msr(void) { - return read_hv_clock_msr() - hv_sched_clock_offset; + return (read_hv_clock_msr() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static struct clocksource hyperv_cs_msr = { diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 17e67a84777d..9dab190c49b0 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -31,6 +31,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses"); /** * struct quad8_iio - IIO device private data structure * @counter: instance of the counter_device + * @fck_prescaler: array of filter clock prescaler configurations * @preset: array of preset values * @count_mode: array of count mode configurations * @quadrature_mode: array of quadrature mode configurations @@ -39,10 +40,12 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses"); * @preset_enable: array of set_to_preset_on_index attribute configurations * @synchronous_mode: array of index function synchronous mode configurations * @index_polarity: array of index function polarity configurations + * @cable_fault_enable: differential encoder cable status enable configurations * @base: base port address of the IIO device */ struct quad8_iio { struct counter_device counter; + unsigned int fck_prescaler[QUAD8_NUM_COUNTERS]; unsigned int preset[QUAD8_NUM_COUNTERS]; unsigned int count_mode[QUAD8_NUM_COUNTERS]; unsigned int quadrature_mode[QUAD8_NUM_COUNTERS]; @@ -51,11 +54,13 @@ struct quad8_iio { unsigned int preset_enable[QUAD8_NUM_COUNTERS]; unsigned int synchronous_mode[QUAD8_NUM_COUNTERS]; unsigned int index_polarity[QUAD8_NUM_COUNTERS]; + unsigned int cable_fault_enable; unsigned int base; }; #define QUAD8_REG_CHAN_OP 0x11 #define QUAD8_REG_INDEX_INPUT_LEVELS 0x16 +#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17 /* Borrow Toggle flip-flop */ #define QUAD8_FLAG_BT BIT(0) /* Carry Toggle flip-flop */ @@ -84,6 +89,8 @@ struct quad8_iio { #define QUAD8_RLD_PRESET_CNTR 0x08 /* Transfer Counter to Output Latch */ #define QUAD8_RLD_CNTR_OUT 0x10 +/* Transfer Preset Register LSB to FCK Prescaler */ +#define QUAD8_RLD_PRESET_PSC 0x18 #define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00 #define QUAD8_CHAN_OP_RESET_COUNTERS 0x01 #define QUAD8_CMR_QUADRATURE_X1 0x08 @@ -1140,6 +1147,119 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, return len; } +static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter, + struct counter_signal *signal, + void *private, char *buf) +{ + const struct quad8_iio *const priv = counter->priv; + const size_t channel_id = signal->id / 2; + const bool disabled = !(priv->cable_fault_enable & BIT(channel_id)); + unsigned int status; + unsigned int fault; + + if (disabled) + return -EINVAL; + + /* Logic 0 = cable fault */ + status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + + /* Mask respective channel and invert logic */ + fault = !(status & BIT(channel_id)); + + return sprintf(buf, "%u\n", fault); +} + +static ssize_t quad8_signal_cable_fault_enable_read( + struct counter_device *counter, struct counter_signal *signal, + void *private, char *buf) +{ + const struct quad8_iio *const priv = counter->priv; + const size_t channel_id = signal->id / 2; + const unsigned int enb = !!(priv->cable_fault_enable & BIT(channel_id)); + + return sprintf(buf, "%u\n", enb); +} + +static ssize_t quad8_signal_cable_fault_enable_write( + struct counter_device *counter, struct counter_signal *signal, + void *private, const char *buf, size_t len) +{ + struct quad8_iio *const priv = counter->priv; + const size_t channel_id = signal->id / 2; + bool enable; + int ret; + unsigned int cable_fault_enable; + + ret = kstrtobool(buf, &enable); + if (ret) + return ret; + + if (enable) + priv->cable_fault_enable |= BIT(channel_id); + else + priv->cable_fault_enable &= ~BIT(channel_id); + + /* Enable is active low in Differential Encoder Cable Status register */ + cable_fault_enable = ~priv->cable_fault_enable; + + outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + + return len; +} + +static ssize_t quad8_signal_fck_prescaler_read(struct counter_device *counter, + struct counter_signal *signal, void *private, char *buf) +{ + const struct quad8_iio *const priv = counter->priv; + const size_t channel_id = signal->id / 2; + + return sprintf(buf, "%u\n", priv->fck_prescaler[channel_id]); +} + +static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, + struct counter_signal *signal, void *private, const char *buf, + size_t len) +{ + struct quad8_iio *const priv = counter->priv; + const size_t channel_id = signal->id / 2; + const int base_offset = priv->base + 2 * channel_id; + u8 prescaler; + int ret; + + ret = kstrtou8(buf, 0, &prescaler); + if (ret) + return ret; + + priv->fck_prescaler[channel_id] = prescaler; + + /* Reset Byte Pointer */ + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + + /* Set filter clock factor */ + outb(prescaler, base_offset); + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC, + base_offset + 1); + + return len; +} + +static const struct counter_signal_ext quad8_signal_ext[] = { + { + .name = "cable_fault", + .read = quad8_signal_cable_fault_read + }, + { + .name = "cable_fault_enable", + .read = quad8_signal_cable_fault_enable_read, + .write = quad8_signal_cable_fault_enable_write + }, + { + .name = "filter_clock_prescaler", + .read = quad8_signal_fck_prescaler_read, + .write = quad8_signal_fck_prescaler_write + } +}; + static const struct counter_signal_ext quad8_index_ext[] = { COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum), COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum), @@ -1147,9 +1267,11 @@ static const struct counter_signal_ext quad8_index_ext[] = { COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum) }; -#define QUAD8_QUAD_SIGNAL(_id, _name) { \ - .id = (_id), \ - .name = (_name) \ +#define QUAD8_QUAD_SIGNAL(_id, _name) { \ + .id = (_id), \ + .name = (_name), \ + .ext = quad8_signal_ext, \ + .num_ext = ARRAY_SIZE(quad8_signal_ext) \ } #define QUAD8_INDEX_SIGNAL(_id, _name) { \ @@ -1314,6 +1436,12 @@ static int quad8_probe(struct device *dev, unsigned int id) base_offset = base[id] + 2 * i; /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + /* Reset filter clock factor */ + outb(0, base_offset); + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC, + base_offset + 1); + /* Reset Byte Pointer */ + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); /* Reset Preset Register */ for (j = 0; j < 3; j++) outb(0x00, base_offset); @@ -1328,6 +1456,8 @@ static int quad8_probe(struct device *dev, unsigned int id) /* Disable index function; negative index polarity */ outb(QUAD8_CTR_IDR, base_offset + 1); } + /* Disable Differential Encoder Cable Status for all channels */ + outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS); /* Enable all counters */ outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP); diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 3eafccec3beb..ef2a974a2f10 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -8,10 +8,10 @@ * */ #include <linux/counter.h> -#include <linux/iio/iio.h> -#include <linux/iio/types.h> #include <linux/mfd/stm32-timers.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #define TIM_CCMR_CCXS (BIT(8) | BIT(0)) @@ -20,11 +20,20 @@ #define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \ TIM_CCER_CC2P | TIM_CCER_CC2NP) +struct stm32_timer_regs { + u32 cr1; + u32 cnt; + u32 smcr; + u32 arr; +}; + struct stm32_timer_cnt { struct counter_device counter; struct regmap *regmap; struct clk *clk; u32 ceiling; + bool enabled; + struct stm32_timer_regs bak; }; /** @@ -224,6 +233,9 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter, clk_disable(priv->clk); } + /* Keep enabled state to properly handle low power states */ + priv->enabled = enable; + return len; } @@ -358,10 +370,59 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) priv->counter.num_signals = ARRAY_SIZE(stm32_signals); priv->counter.priv = priv; + platform_set_drvdata(pdev, priv); + /* Register Counter device */ return devm_counter_register(dev, &priv->counter); } +static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev) +{ + struct stm32_timer_cnt *priv = dev_get_drvdata(dev); + + /* Only take care of enabled counter: don't disturb other MFD child */ + if (priv->enabled) { + /* Backup registers that may get lost in low power mode */ + regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr); + regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr); + regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt); + regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1); + + /* Disable the counter */ + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); + clk_disable(priv->clk); + } + + return pinctrl_pm_select_sleep_state(dev); +} + +static int __maybe_unused stm32_timer_cnt_resume(struct device *dev) +{ + struct stm32_timer_cnt *priv = dev_get_drvdata(dev); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; + + if (priv->enabled) { + clk_enable(priv->clk); + + /* Restore registers that may have been lost */ + regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr); + regmap_write(priv->regmap, TIM_ARR, priv->bak.arr); + regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt); + + /* Also re-enables the counter */ + regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_timer_cnt_pm_ops, stm32_timer_cnt_suspend, + stm32_timer_cnt_resume); + static const struct of_device_id stm32_timer_cnt_of_match[] = { { .compatible = "st,stm32-timer-counter", }, {}, @@ -373,6 +434,7 @@ static struct platform_driver stm32_timer_cnt_driver = { .driver = { .name = "stm32-timer-counter", .of_match_table = stm32_timer_cnt_of_match, + .pm = &stm32_timer_cnt_pm_ops, }, }; module_platform_driver(stm32_timer_cnt_driver); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3858d86cf409..15c1a1231516 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -128,7 +128,7 @@ config ARM_OMAP2PLUS_CPUFREQ config ARM_QCOM_CPUFREQ_NVMEM tristate "Qualcomm nvmem based CPUFreq" - depends on ARM64 + depends on ARCH_QCOM depends on QCOM_QFPROM depends on QCOM_SMEM select PM_OPP diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index a6528388952e..62502d0e4c33 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -25,7 +25,7 @@ config X86_PCC_CPUFREQ This driver adds support for the PCC interface. For details, take a look at: - <file:Documentation/cpu-freq/pcc-cpufreq.txt>. + <file:Documentation/admin-guide/pm/cpufreq_drivers.rst>. To compile this driver as a module, choose M here: the module will be called pcc-cpufreq. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d6f7df33ab8c..289e8ce3fd13 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -30,6 +30,7 @@ #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpufeature.h> +#include <asm/cpu_device_id.h> MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -991,8 +992,8 @@ late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); static const struct x86_cpu_id acpi_cpufreq_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_ACPI), - X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), + X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL), + X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index e2df9d112106..f7c4206d4c90 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -18,6 +18,7 @@ #include <asm/msr.h> #include <asm/cpufeature.h> +#include <asm/cpu_device_id.h> #include "cpufreq_ondemand.h" @@ -144,7 +145,7 @@ static void __exit amd_freq_sensitivity_exit(void) module_exit(amd_freq_sensitivity_exit); static const struct x86_cpu_id amd_freq_sensitivity_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK), + X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index f2ae9cd455c1..cb9db16bea61 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -141,6 +141,11 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "ti,dra7", }, { .compatible = "ti,omap3", }, + { .compatible = "qcom,ipq8064", }, + { .compatible = "qcom,apq8064", }, + { .compatible = "qcom,msm8974", }, + { .compatible = "qcom,msm8960", }, + { } }; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index d2b5f062a07b..26fe8dfb9ce6 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -363,6 +363,10 @@ static int dt_cpufreq_probe(struct platform_device *pdev) dt_cpufreq_driver.resume = data->resume; if (data->suspend) dt_cpufreq_driver.suspend = data->suspend; + if (data->get_intermediate) { + dt_cpufreq_driver.target_intermediate = data->target_intermediate; + dt_cpufreq_driver.get_intermediate = data->get_intermediate; + } } ret = cpufreq_register_driver(&dt_cpufreq_driver); diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h index a5a45b547d0b..28c8af7ec5ef 100644 --- a/drivers/cpufreq/cpufreq-dt.h +++ b/drivers/cpufreq/cpufreq-dt.h @@ -14,6 +14,10 @@ struct cpufreq_policy; struct cpufreq_dt_platform_data { bool have_governor_per_policy; + unsigned int (*get_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + int (*target_intermediate)(struct cpufreq_policy *policy, + unsigned int index); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cbe6c94bf158..808874bccf4a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1076,9 +1076,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) pol = policy->last_policy; } else if (def_gov) { pol = cpufreq_parse_policy(def_gov->name); - } else { - return -ENODATA; + /* + * In case the default governor is neiter "performance" + * nor "powersave", fall back to the initial policy + * value set by the driver. + */ + if (pol == CPUFREQ_POLICY_UNKNOWN) + pol = policy->policy; } + if (pol != CPUFREQ_POLICY_PERFORMANCE && + pol != CPUFREQ_POLICY_POWERSAVE) + return -ENODATA; } return cpufreq_set_policy(policy, gov, pol); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index f9bcf0f3ea30..94d959a8e954 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -90,35 +90,35 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) if (policy->fast_switch_enabled) return 0; - len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); - len += snprintf(buf + len, PAGE_SIZE - len, " : "); + len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, " : "); for (i = 0; i < stats->state_num; i++) { if (len >= PAGE_SIZE) break; - len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", + len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", stats->freq_table[i]); } if (len >= PAGE_SIZE) return PAGE_SIZE; - len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); for (i = 0; i < stats->state_num; i++) { if (len >= PAGE_SIZE) break; - len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", + len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ", stats->freq_table[i]); for (j = 0; j < stats->state_num; j++) { if (len >= PAGE_SIZE) break; - len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", + len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", stats->trans_table[i*stats->max_state+j]); } if (len >= PAGE_SIZE) break; - len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); } if (len >= PAGE_SIZE) { diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 45c18c6b8081..776a58bab0ff 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -385,7 +385,7 @@ static struct cpufreq_driver eps_driver = { /* This driver will work only on Centaur C7 processors with * Enhanced SpeedStep/PowerSaver registers */ static const struct x86_cpu_id eps_cpu_id[] = { - { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_EST }, + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_EST, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id); diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 2242541f7ae3..4ce5eb35dc46 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -198,7 +198,7 @@ static struct cpufreq_driver elanfreq_driver = { }; static const struct x86_cpu_id elan_id[] = { - { X86_VENDOR_AMD, 4, 10, }, + X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 10, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, elan_id); diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 6cb8193421ea..de206d2745fe 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -19,6 +19,8 @@ #define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8) #define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6 #define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6) +#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5 +#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5) /* cpufreq-dt device registered by imx-cpufreq-dt */ static struct platform_device *cpufreq_dt_pdev; @@ -31,6 +33,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) int speed_grade, mkt_segment; int ret; + if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL)) + return -ENODEV; + ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value); if (ret) return ret; @@ -42,7 +47,13 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) else speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; - mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; + + if (of_machine_is_compatible("fsl,imx8mp")) + mkt_segment = (cell_value & IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK) + >> IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT; + else + mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) + >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; /* * Early samples without fuses written report "0 0" which may NOT diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 648a09a1778a..fdb2ffffbd15 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -216,31 +216,41 @@ static struct cpufreq_driver imx6q_cpufreq_driver = { #define OCOTP_CFG3_SPEED_996MHZ 0x2 #define OCOTP_CFG3_SPEED_852MHZ 0x1 -static void imx6q_opp_check_speed_grading(struct device *dev) +static int imx6q_opp_check_speed_grading(struct device *dev) { struct device_node *np; void __iomem *base; u32 val; + int ret; - np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp"); - if (!np) - return; + if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { + ret = nvmem_cell_read_u32(dev, "speed_grade", &val); + if (ret) + return ret; + } else { + np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp"); + if (!np) + return -ENOENT; - base = of_iomap(np, 0); - if (!base) { - dev_err(dev, "failed to map ocotp\n"); - goto put_node; + base = of_iomap(np, 0); + of_node_put(np); + if (!base) { + dev_err(dev, "failed to map ocotp\n"); + return -EFAULT; + } + + /* + * SPEED_GRADING[1:0] defines the max speed of ARM: + * 2b'11: 1200000000Hz; + * 2b'10: 996000000Hz; + * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz. + * 2b'00: 792000000Hz; + * We need to set the max speed of ARM according to fuse map. + */ + val = readl_relaxed(base + OCOTP_CFG3); + iounmap(base); } - /* - * SPEED_GRADING[1:0] defines the max speed of ARM: - * 2b'11: 1200000000Hz; - * 2b'10: 996000000Hz; - * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz. - * 2b'00: 792000000Hz; - * We need to set the max speed of ARM according to fuse map. - */ - val = readl_relaxed(base + OCOTP_CFG3); val >>= OCOTP_CFG3_SPEED_SHIFT; val &= 0x3; @@ -257,9 +267,8 @@ static void imx6q_opp_check_speed_grading(struct device *dev) if (dev_pm_opp_disable(dev, 1200000000)) dev_warn(dev, "failed to disable 1.2GHz OPP\n"); } - iounmap(base); -put_node: - of_node_put(np); + + return 0; } #define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2 @@ -281,6 +290,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); if (!np) + np = of_find_compatible_node(NULL, NULL, + "fsl,imx6ull-ocotp"); + if (!np) return -ENOENT; base = of_iomap(np, 0); @@ -378,23 +390,22 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) goto put_reg; } + /* Because we have added the OPPs here, we must free them */ + free_opp = true; + if (of_machine_is_compatible("fsl,imx6ul") || of_machine_is_compatible("fsl,imx6ull")) { ret = imx6ul_opp_check_speed_grading(cpu_dev); - if (ret) { - if (ret == -EPROBE_DEFER) - goto put_node; - + } else { + ret = imx6q_opp_check_speed_grading(cpu_dev); + } + if (ret) { + if (ret != -EPROBE_DEFER) dev_err(cpu_dev, "failed to read ocotp: %d\n", ret); - goto put_node; - } - } else { - imx6q_opp_check_speed_grading(cpu_dev); + goto out_free_opp; } - /* Because we have added the OPPs here, we must free them */ - free_opp = true; num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c81e1ff29069..4d1e25d1ced1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -922,6 +922,7 @@ static void intel_pstate_update_limits(unsigned int cpu) */ if (global.turbo_disabled_mf != global.turbo_disabled) { global.turbo_disabled_mf = global.turbo_disabled; + arch_set_max_freq_ratio(global.turbo_disabled); for_each_possible_cpu(cpu) intel_pstate_update_max_freq(cpu); } else { @@ -1908,51 +1909,51 @@ static const struct pstate_funcs knl_funcs = { .get_val = core_get_val, }; -#define ICPU(model, policy) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ - (unsigned long)&policy } +#define X86_MATCH(model, policy) \ + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ + X86_FEATURE_APERFMPERF, &policy) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), - ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), - ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), - ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), - ICPU(INTEL_FAM6_HASWELL, core_funcs), - ICPU(INTEL_FAM6_BROADWELL, core_funcs), - ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), - ICPU(INTEL_FAM6_HASWELL_X, core_funcs), - ICPU(INTEL_FAM6_HASWELL_L, core_funcs), - ICPU(INTEL_FAM6_HASWELL_G, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_G, core_funcs), - ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), - ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), - ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), - ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), - ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), - ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), + X86_MATCH(SANDYBRIDGE, core_funcs), + X86_MATCH(SANDYBRIDGE_X, core_funcs), + X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), + X86_MATCH(IVYBRIDGE, core_funcs), + X86_MATCH(HASWELL, core_funcs), + X86_MATCH(BROADWELL, core_funcs), + X86_MATCH(IVYBRIDGE_X, core_funcs), + X86_MATCH(HASWELL_X, core_funcs), + X86_MATCH(HASWELL_L, core_funcs), + X86_MATCH(HASWELL_G, core_funcs), + X86_MATCH(BROADWELL_G, core_funcs), + X86_MATCH(ATOM_AIRMONT, airmont_funcs), + X86_MATCH(SKYLAKE_L, core_funcs), + X86_MATCH(BROADWELL_X, core_funcs), + X86_MATCH(SKYLAKE, core_funcs), + X86_MATCH(BROADWELL_D, core_funcs), + X86_MATCH(XEON_PHI_KNL, knl_funcs), + X86_MATCH(XEON_PHI_KNM, knl_funcs), + X86_MATCH(ATOM_GOLDMONT, core_funcs), + X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), + X86_MATCH(SKYLAKE_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), + X86_MATCH(BROADWELL_D, core_funcs), + X86_MATCH(BROADWELL_X, core_funcs), + X86_MATCH(SKYLAKE_X, core_funcs), {} }; static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - ICPU(INTEL_FAM6_KABYLAKE, core_funcs), + X86_MATCH(KABYLAKE, core_funcs), {} }; static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE, core_funcs), + X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(SKYLAKE, core_funcs), {} }; @@ -2155,15 +2156,19 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu, } } -static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) +static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, + struct cpufreq_policy_data *policy) { - struct cpudata *cpu = all_cpu_data[policy->cpu]; - update_turbo_state(); cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, intel_pstate_get_max_freq(cpu)); intel_pstate_adjust_policy_max(cpu, policy); +} + +static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) +{ + intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); return 0; } @@ -2243,10 +2248,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) return ret; - if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) - policy->policy = CPUFREQ_POLICY_PERFORMANCE; - else - policy->policy = CPUFREQ_POLICY_POWERSAVE; + /* + * Set the policy to powersave to provide a valid fallback value in case + * the default cpufreq governor is neither powersave nor performance. + */ + policy->policy = CPUFREQ_POLICY_POWERSAVE; return 0; } @@ -2268,12 +2274,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - update_turbo_state(); - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - intel_pstate_get_max_freq(cpu)); - - intel_pstate_adjust_policy_max(cpu, policy); - + intel_pstate_verify_cpu_policy(cpu, policy); intel_pstate_update_perf_limits(cpu, policy->min, policy->max); return 0; @@ -2725,13 +2726,14 @@ static inline void intel_pstate_request_control_from_smm(void) {} #define INTEL_PSTATE_HWP_BROADWELL 0x01 -#define ICPU_HWP(model, hwp_mode) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } +#define X86_MATCH_HWP(model, hwp_mode) \ + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ + X86_FEATURE_HWP, hwp_mode) static const struct x86_cpu_id hwp_support_ids[] __initconst = { - ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), - ICPU_HWP(X86_MODEL_ANY, 0), + X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(ANY, 0), {} }; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 92d92e67ae0a..123fb006810d 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -910,7 +910,7 @@ static struct cpufreq_driver longhaul_driver = { }; static const struct x86_cpu_id longhaul_id[] = { - { X86_VENDOR_CENTAUR, 6 }, + X86_MATCH_VENDOR_FAM(CENTAUR, 6, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, longhaul_id); diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 0b08be8bff76..1caaec7c280b 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -281,8 +281,7 @@ static struct cpufreq_driver longrun_driver = { }; static const struct x86_cpu_id longrun_ids[] = { - { X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY, - X86_FEATURE_LONGRUN }, + X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, longrun_ids); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index efc0b46efada..bb61677c11c7 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -231,7 +231,7 @@ static struct cpufreq_driver p4clockmod_driver = { }; static const struct x86_cpu_id cpufreq_p4_id[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ACC }, + X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL), {} }; diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 0196f8129597..41eefef95d87 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -258,8 +258,8 @@ static struct cpufreq_driver powernow_k6_driver = { }; static const struct x86_cpu_id powernow_k6_ids[] = { - { X86_VENDOR_AMD, 5, 12 }, - { X86_VENDOR_AMD, 5, 13 }, + X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 12, NULL), + X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 13, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids); diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 5e5171d3eece..5d515fc34836 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -109,7 +109,7 @@ static int check_fsb(unsigned int fsbspeed) } static const struct x86_cpu_id powernow_k7_cpuids[] = { - { X86_VENDOR_AMD, 6, }, + X86_MATCH_VENDOR_FAM(AMD, 6, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2db2f1739e09..3984959eed1d 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -452,7 +452,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, static const struct x86_cpu_id powernow_k8_ids[] = { /* IO based frequency switching */ - { X86_VENDOR_AMD, 0xf }, + X86_MATCH_VENDOR_FAM(AMD, 0xf, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index f0d2d5035413..a1b8238872a2 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -49,12 +49,14 @@ struct qcom_cpufreq_drv; struct qcom_cpufreq_match_data { int (*get_version)(struct device *cpu_dev, struct nvmem_cell *speedbin_nvmem, + char **pvs_name, struct qcom_cpufreq_drv *drv); const char **genpd_names; }; struct qcom_cpufreq_drv { - struct opp_table **opp_tables; + struct opp_table **names_opp_tables; + struct opp_table **hw_opp_tables; struct opp_table **genpd_opp_tables; u32 versions; const struct qcom_cpufreq_match_data *data; @@ -62,6 +64,84 @@ struct qcom_cpufreq_drv { static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; +static void get_krait_bin_format_a(struct device *cpu_dev, + int *speed, int *pvs, int *pvs_ver, + struct nvmem_cell *pvs_nvmem, u8 *buf) +{ + u32 pte_efuse; + + pte_efuse = *((u32 *)buf); + + *speed = pte_efuse & 0xf; + if (*speed == 0xf) + *speed = (pte_efuse >> 4) & 0xf; + + if (*speed == 0xf) { + *speed = 0; + dev_warn(cpu_dev, "Speed bin: Defaulting to %d\n", *speed); + } else { + dev_dbg(cpu_dev, "Speed bin: %d\n", *speed); + } + + *pvs = (pte_efuse >> 10) & 0x7; + if (*pvs == 0x7) + *pvs = (pte_efuse >> 13) & 0x7; + + if (*pvs == 0x7) { + *pvs = 0; + dev_warn(cpu_dev, "PVS bin: Defaulting to %d\n", *pvs); + } else { + dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); + } +} + +static void get_krait_bin_format_b(struct device *cpu_dev, + int *speed, int *pvs, int *pvs_ver, + struct nvmem_cell *pvs_nvmem, u8 *buf) +{ + u32 pte_efuse, redundant_sel; + + pte_efuse = *((u32 *)buf); + redundant_sel = (pte_efuse >> 24) & 0x7; + + *pvs_ver = (pte_efuse >> 4) & 0x3; + + switch (redundant_sel) { + case 1: + *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7); + *speed = (pte_efuse >> 27) & 0xf; + break; + case 2: + *pvs = (pte_efuse >> 27) & 0xf; + *speed = pte_efuse & 0x7; + break; + default: + /* 4 bits of PVS are in efuse register bits 31, 8-6. */ + *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7); + *speed = pte_efuse & 0x7; + } + + /* Check SPEED_BIN_BLOW_STATUS */ + if (pte_efuse & BIT(3)) { + dev_dbg(cpu_dev, "Speed bin: %d\n", *speed); + } else { + dev_warn(cpu_dev, "Speed bin not set. Defaulting to 0!\n"); + *speed = 0; + } + + /* Check PVS_BLOW_STATUS */ + pte_efuse = *(((u32 *)buf) + 4); + pte_efuse &= BIT(21); + if (pte_efuse) { + dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); + } else { + dev_warn(cpu_dev, "PVS bin not set. Defaulting to 0!\n"); + *pvs = 0; + } + + dev_dbg(cpu_dev, "PVS version: %d\n", *pvs_ver); +} + static enum _msm8996_version qcom_cpufreq_get_msm_id(void) { size_t len; @@ -93,11 +173,13 @@ static enum _msm8996_version qcom_cpufreq_get_msm_id(void) static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, struct nvmem_cell *speedbin_nvmem, + char **pvs_name, struct qcom_cpufreq_drv *drv) { size_t len; u8 *speedbin; enum _msm8996_version msm8996_version; + *pvs_name = NULL; msm8996_version = qcom_cpufreq_get_msm_id(); if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { @@ -125,10 +207,51 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, return 0; } +static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + int speed = 0, pvs = 0, pvs_ver = 0; + u8 *speedbin; + size_t len; + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + switch (len) { + case 4: + get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver, + speedbin_nvmem, speedbin); + break; + case 8: + get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver, + speedbin_nvmem, speedbin); + break; + default: + dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); + return -ENODEV; + } + + snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", + speed, pvs, pvs_ver); + + drv->versions = (1 << speed); + + kfree(speedbin); + return 0; +} + static const struct qcom_cpufreq_match_data match_data_kryo = { .get_version = qcom_cpufreq_kryo_name_version, }; +static const struct qcom_cpufreq_match_data match_data_krait = { + .get_version = qcom_cpufreq_krait_name_version, +}; + static const char *qcs404_genpd_names[] = { "cpr", NULL }; static const struct qcom_cpufreq_match_data match_data_qcs404 = { @@ -141,6 +264,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) struct nvmem_cell *speedbin_nvmem; struct device_node *np; struct device *cpu_dev; + char *pvs_name = "speedXX-pvsXX-vXX"; unsigned cpu; const struct of_device_id *match; int ret; @@ -153,7 +277,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (!np) return -ENOENT; - ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); + ret = of_device_is_compatible(np, "operating-points-v2-qcom-cpu"); if (!ret) { of_node_put(np); return -ENOENT; @@ -181,7 +305,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) goto free_drv; } - ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv); + ret = drv->data->get_version(cpu_dev, + speedbin_nvmem, &pvs_name, drv); if (ret) { nvmem_cell_put(speedbin_nvmem); goto free_drv; @@ -190,12 +315,20 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } of_node_put(np); - drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables), + drv->names_opp_tables = kcalloc(num_possible_cpus(), + sizeof(*drv->names_opp_tables), GFP_KERNEL); - if (!drv->opp_tables) { + if (!drv->names_opp_tables) { ret = -ENOMEM; goto free_drv; } + drv->hw_opp_tables = kcalloc(num_possible_cpus(), + sizeof(*drv->hw_opp_tables), + GFP_KERNEL); + if (!drv->hw_opp_tables) { + ret = -ENOMEM; + goto free_opp_names; + } drv->genpd_opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->genpd_opp_tables), @@ -213,11 +346,23 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } if (drv->data->get_version) { - drv->opp_tables[cpu] = - dev_pm_opp_set_supported_hw(cpu_dev, - &drv->versions, 1); - if (IS_ERR(drv->opp_tables[cpu])) { - ret = PTR_ERR(drv->opp_tables[cpu]); + + if (pvs_name) { + drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name( + cpu_dev, + pvs_name); + if (IS_ERR(drv->names_opp_tables[cpu])) { + ret = PTR_ERR(drv->names_opp_tables[cpu]); + dev_err(cpu_dev, "Failed to add OPP name %s\n", + pvs_name); + goto free_opp; + } + } + + drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw( + cpu_dev, &drv->versions, 1); + if (IS_ERR(drv->hw_opp_tables[cpu])) { + ret = PTR_ERR(drv->hw_opp_tables[cpu]); dev_err(cpu_dev, "Failed to set supported hardware\n"); goto free_genpd_opp; @@ -259,11 +404,18 @@ free_genpd_opp: kfree(drv->genpd_opp_tables); free_opp: for_each_possible_cpu(cpu) { - if (IS_ERR_OR_NULL(drv->opp_tables[cpu])) + if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu])) + break; + dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]); + } + for_each_possible_cpu(cpu) { + if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu])) break; - dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]); + dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]); } - kfree(drv->opp_tables); + kfree(drv->hw_opp_tables); +free_opp_names: + kfree(drv->names_opp_tables); free_drv: kfree(drv); @@ -278,13 +430,16 @@ static int qcom_cpufreq_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); for_each_possible_cpu(cpu) { - if (drv->opp_tables[cpu]) - dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]); + if (drv->names_opp_tables[cpu]) + dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]); + if (drv->hw_opp_tables[cpu]) + dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]); if (drv->genpd_opp_tables[cpu]) dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); } - kfree(drv->opp_tables); + kfree(drv->names_opp_tables); + kfree(drv->hw_opp_tables); kfree(drv->genpd_opp_tables); kfree(drv); @@ -303,6 +458,10 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { { .compatible = "qcom,apq8096", .data = &match_data_kryo }, { .compatible = "qcom,msm8996", .data = &match_data_kryo }, { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, + { .compatible = "qcom,ipq8064", .data = &match_data_krait }, + { .compatible = "qcom,apq8064", .data = &match_data_krait }, + { .compatible = "qcom,msm8974", .data = &match_data_krait }, + { .compatible = "qcom,msm8960", .data = &match_data_krait }, {}, }; diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index c6f647babaad..73a208559fe2 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -95,7 +95,7 @@ static struct cpufreq_driver sc520_freq_driver = { }; static const struct x86_cpu_id sc520_ids[] = { - { X86_VENDOR_AMD, 4, 9 }, + X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 9, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, sc520_ids); diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index b49f494e0285..75b10ecdb60f 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -520,18 +520,12 @@ static struct cpufreq_driver centrino_driver = { * or ASCII model IDs. */ static const struct x86_cpu_id centrino_ids[] = { - { X86_VENDOR_INTEL, 6, 9, X86_FEATURE_EST }, - { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST }, - { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST }, - { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST }, - { X86_VENDOR_INTEL, 15, 3, X86_FEATURE_EST }, - { X86_VENDOR_INTEL, 15, 4, X86_FEATURE_EST }, + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL), + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL), + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL), + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL), {} }; -#if 0 -/* Autoload or not? Do not for now. */ -MODULE_DEVICE_TABLE(x86cpu, centrino_ids); -#endif /** * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 547fd7af5bf5..f2076d72bf39 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -319,15 +319,11 @@ static struct cpufreq_driver speedstep_driver = { }; static const struct x86_cpu_id ss_smi_ids[] = { - { X86_VENDOR_INTEL, 6, 0xb, }, - { X86_VENDOR_INTEL, 6, 0x8, }, - { X86_VENDOR_INTEL, 15, 2 }, + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0), + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0), + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0), {} }; -#if 0 -/* Autoload or not? Do not for now. */ -MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids); -#endif /** * speedstep_init - initializes the SpeedStep CPUFreq driver diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index eeb31bc21cc9..0ce9d4b6dfcc 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -299,15 +299,11 @@ static struct cpufreq_driver speedstep_driver = { }; static const struct x86_cpu_id ss_smi_ids[] = { - { X86_VENDOR_INTEL, 6, 0xb, }, - { X86_VENDOR_INTEL, 6, 0x8, }, - { X86_VENDOR_INTEL, 15, 2 }, + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0), + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0), + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0), {} }; -#if 0 -/* Not auto loaded currently */ -MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids); -#endif /** * speedstep_init - initializes the SpeedStep CPUFreq driver diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 557cb513bf7f..ab0de27539ad 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -25,11 +25,14 @@ #define DRA7_EFUSE_HAS_OD_MPU_OPP 11 #define DRA7_EFUSE_HAS_HIGH_MPU_OPP 15 +#define DRA76_EFUSE_HAS_PLUS_MPU_OPP 18 #define DRA7_EFUSE_HAS_ALL_MPU_OPP 23 +#define DRA76_EFUSE_HAS_ALL_MPU_OPP 24 #define DRA7_EFUSE_NOM_MPU_OPP BIT(0) #define DRA7_EFUSE_OD_MPU_OPP BIT(1) #define DRA7_EFUSE_HIGH_MPU_OPP BIT(2) +#define DRA76_EFUSE_PLUS_MPU_OPP BIT(3) #define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C #define OMAP3_CONTROL_IDCODE 0x4830A204 @@ -80,6 +83,10 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, */ switch (efuse) { + case DRA76_EFUSE_HAS_PLUS_MPU_OPP: + case DRA76_EFUSE_HAS_ALL_MPU_OPP: + calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP; + /* Fall through */ case DRA7_EFUSE_HAS_ALL_MPU_OPP: case DRA7_EFUSE_HAS_HIGH_MPU_OPP: calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index b0ce9bc78113..db124bc1ca2c 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -18,6 +18,10 @@ #include <linux/kvm_para.h> #include <linux/cpuidle_haltpoll.h> +static bool force __read_mostly; +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Load unconditionally"); + static struct cpuidle_device __percpu *haltpoll_cpuidle_devices; static enum cpuhp_state haltpoll_hp_state; @@ -90,6 +94,11 @@ static void haltpoll_uninit(void) haltpoll_cpuidle_devices = NULL; } +static bool haltpool_want(void) +{ + return kvm_para_has_hint(KVM_HINTS_REALTIME) || force; +} + static int __init haltpoll_init(void) { int ret; @@ -101,8 +110,7 @@ static int __init haltpoll_init(void) cpuidle_poll_state_init(drv); - if (!kvm_para_available() || - !kvm_para_has_hint(KVM_HINTS_REALTIME)) + if (!kvm_para_available() || !haltpool_want()) return -ENODEV; ret = cpuidle_register_driver(drv); diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index edd7a54ef0d3..bae9140a65a5 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) return 0; } +static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv, + struct psci_cpuidle_data *data, + unsigned int state_count, int cpu) +{ + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (!psci_has_osi_support()) + return 0; + + data->dev = psci_dt_attach_cpu(cpu); + if (IS_ERR_OR_NULL(data->dev)) + return PTR_ERR_OR_ZERO(data->dev); + + /* + * Using the deepest state for the CPU to trigger a potential selection + * of a shared state for the domain, assumes the domain states are all + * deeper states. + */ + drv->states[state_count - 1].enter = psci_enter_domain_idle_state; + psci_cpuidle_use_cpuhp = true; + + return 0; +} + static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, struct device_node *cpu_node, unsigned int state_count, int cpu) @@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, goto free_mem; } - /* Currently limit the hierarchical topology to be used in OSI mode. */ - if (psci_has_osi_support()) { - data->dev = psci_dt_attach_cpu(cpu); - if (IS_ERR(data->dev)) { - ret = PTR_ERR(data->dev); - goto free_mem; - } - - /* - * Using the deepest state for the CPU to trigger a potential - * selection of a shared state for the domain, assumes the - * domain states are all deeper states. - */ - if (data->dev) { - drv->states[state_count - 1].enter = - psci_enter_domain_idle_state; - psci_cpuidle_use_cpuhp = true; - } - } + /* Initialize optional data, used for the hierarchical topology. */ + ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu); + if (ret < 0) + goto free_mem; /* Idle states parsed correctly, store them in the per-cpu struct. */ data->psci_states = psci_states; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index de81298051b3..c149d9e20dfd 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -736,53 +736,15 @@ int cpuidle_register(struct cpuidle_driver *drv, } EXPORT_SYMBOL_GPL(cpuidle_register); -#ifdef CONFIG_SMP - -/* - * This function gets called when a part of the kernel has a new latency - * requirement. This means we need to get all processors out of their C-state, - * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that - * wakes them all right up. - */ -static int cpuidle_latency_notify(struct notifier_block *b, - unsigned long l, void *v) -{ - wake_up_all_idle_cpus(); - return NOTIFY_OK; -} - -static struct notifier_block cpuidle_latency_notifier = { - .notifier_call = cpuidle_latency_notify, -}; - -static inline void latency_notifier_init(struct notifier_block *n) -{ - pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); -} - -#else /* CONFIG_SMP */ - -#define latency_notifier_init(x) do { } while (0) - -#endif /* CONFIG_SMP */ - /** * cpuidle_init - core initializer */ static int __init cpuidle_init(void) { - int ret; - if (cpuidle_disabled()) return -ENODEV; - ret = cpuidle_add_interface(cpu_subsys.dev_root); - if (ret) - return ret; - - latency_notifier_init(&cpuidle_latency_notifier); - - return 0; + return cpuidle_add_interface(cpu_subsys.dev_root); } module_param(off, int, 0444); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index e48271e117a3..29acaf48e575 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -109,9 +109,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) */ s64 cpuidle_governor_latency_req(unsigned int cpu) { - int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); struct device *device = get_cpu_device(cpu); int device_req = dev_pm_qos_raw_resume_latency(device); + int global_req = cpu_latency_qos_limit(); if (device_req > global_req) device_req = global_req; diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 594d6b1695d5..62c6fe88b212 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -474,7 +474,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), + X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index c826abe79e79..a697a4a3f2d0 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -490,7 +490,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_PHE), + X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index cceee8bc3c2f..6fecd11dafdd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -550,14 +550,14 @@ out: EXPORT_SYMBOL(devfreq_monitor_resume); /** - * devfreq_interval_update() - Update device devfreq monitoring interval + * devfreq_update_interval() - Update device devfreq monitoring interval * @devfreq: the devfreq instance. * @delay: new polling interval to be set. * * Helper function to set new load monitoring polling interval. Function - * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. + * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event. */ -void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) +void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay) { unsigned int cur_delay = devfreq->profile->polling_ms; unsigned int new_delay = *delay; @@ -597,7 +597,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) out: mutex_unlock(&devfreq->lock); } -EXPORT_SYMBOL(devfreq_interval_update); +EXPORT_SYMBOL(devfreq_update_interval); /** * devfreq_notifier_call() - Notify that the device frequency requirements @@ -705,13 +705,13 @@ static void devfreq_dev_release(struct device *dev) if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) { err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req); - if (err) + if (err < 0) dev_warn(dev->parent, "Failed to remove max_freq request: %d\n", err); } if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) { err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req); - if (err) + if (err < 0) dev_warn(dev->parent, "Failed to remove min_freq request: %d\n", err); } @@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev, { struct devfreq *devfreq; struct devfreq_governor *governor; - static atomic_t devfreq_no = ATOMIC_INIT(-1); int err = 0; if (!dev || !profile || !governor_name) { @@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); atomic_set(&devfreq->suspend_count, 0); - dev_set_name(&devfreq->dev, "devfreq%d", - atomic_inc_return(&devfreq_no)); + dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); @@ -1426,7 +1424,7 @@ static ssize_t polling_interval_store(struct device *dev, if (ret != 1) return -EINVAL; - df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); + df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value); ret = count; return ret; diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index dc7533ccc3db..ae4d0cc18359 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h @@ -18,7 +18,7 @@ /* Devfreq events */ #define DEVFREQ_GOV_START 0x1 #define DEVFREQ_GOV_STOP 0x2 -#define DEVFREQ_GOV_INTERVAL 0x3 +#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3 #define DEVFREQ_GOV_SUSPEND 0x4 #define DEVFREQ_GOV_RESUME 0x5 @@ -30,7 +30,7 @@ * @node: list node - contains registered devfreq governors * @name: Governor's name * @immutable: Immutable flag for governor. If the value is 1, - * this govenror is never changeable to other governor. + * this governor is never changeable to other governor. * @interrupt_driven: Devfreq core won't schedule polling work for this * governor if value is set to 1. * @get_target_freq: Returns desired operating frequency for the device. @@ -57,17 +57,16 @@ struct devfreq_governor { unsigned int event, void *data); }; -extern void devfreq_monitor_start(struct devfreq *devfreq); -extern void devfreq_monitor_stop(struct devfreq *devfreq); -extern void devfreq_monitor_suspend(struct devfreq *devfreq); -extern void devfreq_monitor_resume(struct devfreq *devfreq); -extern void devfreq_interval_update(struct devfreq *devfreq, - unsigned int *delay); +void devfreq_monitor_start(struct devfreq *devfreq); +void devfreq_monitor_stop(struct devfreq *devfreq); +void devfreq_monitor_suspend(struct devfreq *devfreq); +void devfreq_monitor_resume(struct devfreq *devfreq); +void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay); -extern int devfreq_add_governor(struct devfreq_governor *governor); -extern int devfreq_remove_governor(struct devfreq_governor *governor); +int devfreq_add_governor(struct devfreq_governor *governor); +int devfreq_remove_governor(struct devfreq_governor *governor); -extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); +int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); static inline int devfreq_update_stats(struct devfreq *df) { diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 3d809f228619..1b314e1df028 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -96,8 +96,8 @@ static int devfreq_simple_ondemand_handler(struct devfreq *devfreq, devfreq_monitor_stop(devfreq); break; - case DEVFREQ_GOV_INTERVAL: - devfreq_interval_update(devfreq, (unsigned int *)data); + case DEVFREQ_GOV_UPDATE_INTERVAL: + devfreq_update_interval(devfreq, (unsigned int *)data); break; case DEVFREQ_GOV_SUSPEND: diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index af94942fcf95..0fd6c4851071 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -131,7 +131,7 @@ static int devfreq_userspace_handler(struct devfreq *devfreq, } static struct devfreq_governor devfreq_userspace = { - .name = "userspace", + .name = DEVFREQ_GOV_USERSPACE, .get_target_freq = devfreq_userspace_func, .event_handler = devfreq_userspace_handler, }; diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index 0b65f89d74d5..28b2c7ca416e 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -734,7 +734,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq, devfreq_monitor_stop(devfreq); break; - case DEVFREQ_GOV_INTERVAL: + case DEVFREQ_GOV_UPDATE_INTERVAL: /* * ACTMON hardware supports up to 256 milliseconds for the * sampling period. @@ -745,7 +745,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq, } tegra_actmon_pause(tegra); - devfreq_interval_update(devfreq, new_delay); + devfreq_update_interval(devfreq, new_delay); ret = tegra_actmon_resume(tegra); break; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index d4097856c86b..c343c7c10b4c 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file) dma_resv_fini(dmabuf->resv); module_put(dmabuf->owner); + kfree(dmabuf->name); kfree(dmabuf); return 0; } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e51d836afcc7..1092d4ce723e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) return; } - spin_lock(&cohc->lock); - /* * When we reach this point, at least one queue item * should have been moved over from cohc->queue to @@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; - spin_unlock(&cohc->lock); - /* * This tasklet will remove items from cohc->active * and thus terminates them. diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c3b1283b6d31..17909fd1820f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device) } if (!device->device_release) - dev_warn(device->dev, + dev_dbg(device->dev, "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); kref_init(&device->ref); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 1d7347825b95..989b7a25ca61 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) dev = &idxd->pdev->dev; idxd_cdev = &wq->idxd_cdev; - dev_dbg(dev, "%s called\n", __func__); + dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); - if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) return -EBUSY; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); if (minor < 0) { rc = minor; + kfree(dev); goto ida_err; } @@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) rc = device_register(dev); if (rc < 0) { dev_err(&idxd->pdev->dev, "device register failed\n"); - put_device(dev); goto dev_reg_err; } idxd_cdev->minor = minor; @@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) dev_reg_err: ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + put_device(dev); ida_err: - kfree(dev); idxd_cdev->dev = NULL; return rc; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 6d907fe150aa..6ca6e520a2fa 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_config(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device config failed: %d\n", rc); return rc; } @@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_enable(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device enable failed: %d\n", rc); return rc; } @@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_register_dma_device(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_dbg(dev, "Failed to register dmaengine device\n"); return rc; } @@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (val > idxd->max_tokens) return -EINVAL; - if (val > idxd->nr_tokens) + if (val > idxd->nr_tokens + group->tokens_reserved) return -EINVAL; group->tokens_reserved = val; @@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%u\n", wq->size); } +static int total_claimed_wq_size(struct idxd_device *idxd) +{ + int i; + int wq_size = 0; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq_size += wq->size; + } + + return wq_size; +} + static ssize_t wq_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev, if (wq->state != IDXD_WQ_DISABLED) return -EPERM; - if (size > idxd->max_wq_size) + if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) return -EINVAL; wq->size = size; @@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev, return -EPERM; old_type = wq->type; - if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) + wq->type = IDXD_WQT_NONE; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) wq->type = IDXD_WQT_KERNEL; else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) wq->type = IDXD_WQT_USER; else - wq->type = IDXD_WQT_NONE; + return -EINVAL; /* If we are changing queue type, clear the name */ if (wq->type != old_type) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 066b21a32232..4d4477df4ede 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0) + if (sdmac->event_id0 >= 0) sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); sdmac->event_id0 = 0; sdmac->event_id1 = 0; + sdmac->context_loaded = false; sdma_set_channel_priority(sdmac, 0); @@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0) { + if (sdmac->event_id0 >= 0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3a45079d11ec..4a750e29bfb5 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Do not allocate if desc are waiting for ack */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { - if (async_tx_test_ack(&dma_desc->txd)) { + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { list_del(&dma_desc->node); spin_unlock_irqrestore(&tdc->lock, flags); dma_desc->txd.flags = 0; @@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) bool was_busy; spin_lock_irqsave(&tdc->lock, flags); - if (list_empty(&tdc->pending_sg_req)) { - spin_unlock_irqrestore(&tdc->lock, flags); - return 0; - } if (!tdc->busy) goto skip_dma_stop; diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index c1511298ece2..4d7561a1b3e3 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (IS_ERR(flow->udma_rflow)) { ret = PTR_ERR(flow->udma_rflow); dev_err(dev, "UDMAX rflow get err %d\n", ret); - goto err; + return ret; } if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { - xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); - return -ENODEV; + ret = -ENODEV; + goto err_rflow_put; } /* request and cfg rings */ @@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (!flow->ringrx) { ret = -ENODEV; dev_err(dev, "Failed to get RX ring\n"); - goto err; + goto err_rflow_put; } flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, @@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (!flow->ringrxfdq) { ret = -ENODEV; dev_err(dev, "Failed to get RXFDQ ring\n"); - goto err; + goto err_ringrx_free; } ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrx %d\n", ret); - goto err; + goto err_ringrxfdq_free; } ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); - goto err; + goto err_ringrxfdq_free; } if (rx_chn->remote) { @@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (ret) { dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, ret); - goto err; + goto err_ringrxfdq_free; } rx_chn->flows_ready++; @@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, flow->udma_rflow_id, rx_chn->flows_ready); return 0; -err: - k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + +err_ringrxfdq_free: + k3_ringacc_ring_free(flow->ringrxfdq); + +err_ringrx_free: + k3_ringacc_ring_free(flow->ringrx); + +err_rflow_put: + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + return ret; } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index ea79c2df28e0..0536866a58ce 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5,6 +5,7 @@ */ #include <linux/kernel.h> +#include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> @@ -96,6 +97,24 @@ struct udma_match_data { u32 level_start_idx[]; }; +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_rx_flush { + struct udma_hwdesc hwdescs[2]; + + size_t buffer_size; + void *buffer_vaddr; + dma_addr_t buffer_paddr; +}; + struct udma_dev { struct dma_device ddev; struct device *dev; @@ -112,6 +131,8 @@ struct udma_dev { struct list_head desc_to_purge; spinlock_t lock; + struct udma_rx_flush rx_flush; + int tchan_cnt; int echan_cnt; int rchan_cnt; @@ -130,16 +151,6 @@ struct udma_dev { u32 psil_base; }; -struct udma_hwdesc { - size_t cppi5_desc_size; - void *cppi5_desc_vaddr; - dma_addr_t cppi5_desc_paddr; - - /* TR descriptor internal pointers */ - void *tr_req_base; - struct cppi5_tr_resp_t *tr_resp_base; -}; - struct udma_desc { struct virt_dma_desc vd; @@ -169,7 +180,7 @@ enum udma_chan_state { struct udma_tx_drain { struct delayed_work work; - unsigned long jiffie; + ktime_t tstamp; u32 residue; }; @@ -502,7 +513,7 @@ static bool udma_is_chan_paused(struct udma_chan *uc) { u32 val, pause_mask; - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG); @@ -551,12 +562,17 @@ static void udma_sync_for_device(struct udma_chan *uc, int idx) } } +static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) +{ + return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; +} + static int udma_push_to_ring(struct udma_chan *uc, int idx) { struct udma_desc *d = uc->desc; - struct k3_ring *ring = NULL; - int ret = -EINVAL; + dma_addr_t paddr; + int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: @@ -567,21 +583,37 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) ring = uc->tchan->t_ring; break; default: - break; + return -EINVAL; } - if (ring) { - dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ + if (idx == -1) { + paddr = udma_get_rx_flush_hwdesc_paddr(uc); + } else { + paddr = udma_curr_cppi5_desc_paddr(d, idx); wmb(); /* Ensure that writes are not moved over this point */ udma_sync_for_device(uc, idx); - ret = k3_ringacc_ring_push(ring, &desc_addr); - uc->in_ring_cnt++; } + ret = k3_ringacc_ring_push(ring, &paddr); + if (!ret) + uc->in_ring_cnt++; + return ret; } +static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) +{ + if (uc->config.dir != DMA_DEV_TO_MEM) + return false; + + if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) + return true; + + return false; +} + static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) { struct k3_ring *ring = NULL; @@ -610,6 +642,10 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) if (cppi5_desc_is_tdcm(*addr)) return ret; + /* Check for flush descriptor */ + if (udma_desc_is_rx_flush(uc, *addr)) + return -ENOENT; + d = udma_udma_desc_from_paddr(uc, *addr); if (d) @@ -890,6 +926,9 @@ static int udma_stop(struct udma_chan *uc) switch (uc->config.dir) { case DMA_DEV_TO_MEM: + if (!uc->cyclic && !uc->desc) + udma_push_to_ring(uc, -1); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); @@ -946,9 +985,10 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + /* Transfer is incomplete, store current residue and time stamp */ if (peer_bcnt < bcnt) { uc->tx_drain.residue = bcnt - peer_bcnt; - uc->tx_drain.jiffie = jiffies; + uc->tx_drain.tstamp = ktime_get(); return false; } @@ -961,35 +1001,59 @@ static void udma_check_tx_completion(struct work_struct *work) tx_drain.work.work); bool desc_done = true; u32 residue_diff; - unsigned long jiffie_diff, delay; + ktime_t time_diff; + unsigned long delay; + + while (1) { + if (uc->desc) { + /* Get previous residue and time stamp */ + residue_diff = uc->tx_drain.residue; + time_diff = uc->tx_drain.tstamp; + /* + * Get current residue and time stamp or see if + * transfer is complete + */ + desc_done = udma_is_desc_really_done(uc, uc->desc); + } - if (uc->desc) { - residue_diff = uc->tx_drain.residue; - jiffie_diff = uc->tx_drain.jiffie; - desc_done = udma_is_desc_really_done(uc, uc->desc); - } - - if (!desc_done) { - jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; - residue_diff -= uc->tx_drain.residue; - if (residue_diff) { - /* Try to guess when we should check next time */ - residue_diff /= jiffie_diff; - delay = uc->tx_drain.residue / residue_diff / 3; - if (jiffies_to_msecs(delay) < 5) - delay = 0; - } else { - /* No progress, check again in 1 second */ - delay = HZ; + if (!desc_done) { + /* + * Find the time delta and residue delta w.r.t + * previous poll + */ + time_diff = ktime_sub(uc->tx_drain.tstamp, + time_diff) + 1; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* + * Try to guess when we should check + * next time by calculating rate at + * which data is being drained at the + * peer device + */ + delay = (time_diff / residue_diff) * + uc->tx_drain.residue; + } else { + /* No progress, check again in 1 second */ + schedule_delayed_work(&uc->tx_drain.work, HZ); + break; + } + + usleep_range(ktime_to_us(delay), + ktime_to_us(delay) + 10); + continue; } - schedule_delayed_work(&uc->tx_drain.work, delay); - } else if (uc->desc) { - struct udma_desc *d = uc->desc; + if (uc->desc) { + struct udma_desc *d = uc->desc; - uc->bcnt += d->residue; - udma_start(uc); - vchan_cookie_complete(&d->vd); + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + break; + } + + break; } } @@ -1033,29 +1097,27 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) goto out; } - if (uc->cyclic) { - /* push the descriptor back to the ring */ - if (d == uc->desc) { + if (d == uc->desc) { + /* active descriptor */ + if (uc->cyclic) { udma_cyclic_packet_elapsed(uc); vchan_cyclic_callback(&d->vd); - } - } else { - bool desc_done = false; - - if (d == uc->desc) { - desc_done = udma_is_desc_really_done(uc, d); - - if (desc_done) { + } else { + if (udma_is_desc_really_done(uc, d)) { uc->bcnt += d->residue; udma_start(uc); + vchan_cookie_complete(&d->vd); } else { schedule_delayed_work(&uc->tx_drain.work, 0); } } - - if (desc_done) - vchan_cookie_complete(&d->vd); + } else { + /* + * terminated descriptor, mark the descriptor as + * completed to update the channel's cookie marker + */ + dma_cookie_complete(&d->vd.tx); } } out: @@ -1965,36 +2027,81 @@ static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, return d; } +/** + * udma_get_tr_counters - calculate TR counters for a given length + * @len: Length of the trasnfer + * @align_to: Preferred alignment + * @tr0_cnt0: First TR icnt0 + * @tr0_cnt1: First TR icnt1 + * @tr1_cnt0: Second (if used) TR icnt0 + * + * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated + * For len >= SZ_64K two TRs are used in a simple way: + * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) + * Second TR: the remaining length (tr1_cnt0) + * + * Returns the number of TRs the length needs (1 or 2) + * -EINVAL if the length can not be supported + */ +static int udma_get_tr_counters(size_t len, unsigned long align_to, + u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) +{ + if (len < SZ_64K) { + *tr0_cnt0 = len; + *tr0_cnt1 = 1; + + return 1; + } + + if (align_to > 3) + align_to = 3; + +realign: + *tr0_cnt0 = SZ_64K - BIT(align_to); + if (len / *tr0_cnt0 >= SZ_64K) { + if (align_to) { + align_to--; + goto realign; + } + return -EINVAL; + } + + *tr0_cnt1 = len / *tr0_cnt0; + *tr1_cnt0 = len % *tr0_cnt0; + + return 2; +} + static struct udma_desc * udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { - enum dma_slave_buswidth dev_width; struct scatterlist *sgent; struct udma_desc *d; - size_t tr_size; struct cppi5_tr_type1_t *tr_req = NULL; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; - u32 burst; + size_t tr_size; + int num_tr = 0; + int tr_idx = 0; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + /* estimate the number of TRs we will need */ + for_each_sg(sgl, sgent, sglen, i) { + if (sg_dma_len(sgent) < SZ_64K) + num_tr++; + else + num_tr += 2; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); if (!d) return NULL; @@ -2002,19 +2109,46 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, tr_req = d->hwdesc[0].tr_req_base; for_each_sg(sgl, sgent, sglen, i) { - d->residue += sg_dma_len(sgent); + dma_addr_t sg_addr = sg_dma_address(sgent); + + num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), + &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %u is not supported\n", + sg_dma_len(sgent)); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); - tr_req[i].addr = sg_dma_address(sgent); - tr_req[i].icnt0 = burst * dev_width; - tr_req[i].dim1 = burst * dev_width; - tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + tr_req[tr_idx].addr = sg_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + tr_idx++; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + + tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + tr_idx++; + } + + d->residue += sg_dma_len(sgent); } - cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP); return d; } @@ -2319,47 +2453,66 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { - enum dma_slave_buswidth dev_width; struct udma_desc *d; - size_t tr_size; + size_t tr_size, period_addr; struct cppi5_tr_type1_t *tr_req; - unsigned int i; unsigned int periods = buf_len / period_len; - u32 burst; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + unsigned int i; + int num_tr; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + period_len); + return NULL; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); if (!d) return NULL; tr_req = d->hwdesc[0].tr_req_base; + period_addr = buf_addr; for (i = 0; i < periods; i++) { - cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, - CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + int tr_idx = i * num_tr; - tr_req[i].addr = buf_addr + period_len * i; - tr_req[i].icnt0 = dev_width; - tr_req[i].icnt1 = period_len / dev_width; - tr_req[i].dim1 = dev_width; + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, + false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + tr_idx++; + + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + } if (!(flags & DMA_PREP_INTERRUPT)) - cppi5_tr_csf_set(&tr_req[i].flags, + cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); + + period_addr += period_len; } return d; @@ -2517,29 +2670,12 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - if (len < SZ_64K) { - num_tr = 1; - tr0_cnt0 = len; - tr0_cnt1 = 1; - } else { - unsigned long align_to = __ffs(src | dest); - - if (align_to > 3) - align_to = 3; - /* - * Keep simple: tr0: SZ_64K-alignment blocks, - * tr1: the remaining - */ - num_tr = 2; - tr0_cnt0 = (SZ_64K - BIT(align_to)); - if (len / tr0_cnt0 >= SZ_64K) { - dev_err(uc->ud->dev, "size %zu is not supported\n", - len); - return NULL; - } - - tr0_cnt1 = len / tr0_cnt0; - tr1_cnt0 = len % tr0_cnt0; + num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; } d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); @@ -2631,6 +2767,9 @@ static enum dma_status udma_tx_status(struct dma_chan *chan, ret = dma_cookie_status(chan, cookie, txstate); + if (!udma_is_chan_running(uc)) + ret = DMA_COMPLETE; + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) ret = DMA_PAUSED; @@ -2697,11 +2836,8 @@ static int udma_pause(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* pause the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, @@ -2730,11 +2866,8 @@ static int udma_resume(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* resume the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, @@ -3248,6 +3381,98 @@ static int udma_setup_resources(struct udma_dev *ud) return ch_count; } +static int udma_setup_rx_flush(struct udma_dev *ud) +{ + struct udma_rx_flush *rx_flush = &ud->rx_flush; + struct cppi5_desc_hdr_t *tr_desc; + struct cppi5_tr_type1_t *tr_req; + struct cppi5_host_desc_t *desc; + struct device *dev = ud->dev; + struct udma_hwdesc *hwdesc; + size_t tr_size; + + /* Allocate 1K buffer for discarded data on RX channel teardown */ + rx_flush->buffer_size = SZ_1K; + rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, + GFP_KERNEL); + if (!rx_flush->buffer_vaddr) + return -ENOMEM; + + rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, + rx_flush->buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, rx_flush->buffer_paddr)) + return -ENOMEM; + + /* Set up descriptor to be used for TR mode */ + hwdesc = &rx_flush->hwdescs[0]; + tr_size = sizeof(struct cppi5_tr_type1_t); + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; + + tr_desc = hwdesc->cppi5_desc_vaddr; + cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); + cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, 0); + + tr_req = hwdesc->tr_req_base; + cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req->addr = rx_flush->buffer_paddr; + tr_req->icnt0 = rx_flush->buffer_size; + tr_req->icnt1 = 1; + + /* Set up descriptor to be used for packet mode */ + hwdesc = &rx_flush->hwdescs[1]; + hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + CPPI5_INFO0_HDESC_EPIB_SIZE + + CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + desc = hwdesc->cppi5_desc_vaddr; + cppi5_hdesc_init(desc, 0, 0); + cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); + + cppi5_hdesc_attach_buf(desc, + rx_flush->buffer_paddr, rx_flush->buffer_size, + rx_flush->buffer_paddr, rx_flush->buffer_size); + + dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, + hwdesc->cppi5_desc_size, DMA_TO_DEVICE); + return 0; +} + #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ @@ -3361,6 +3586,10 @@ static int udma_probe(struct platform_device *pdev) if (ud->desc_align < dma_get_cache_alignment()) ud->desc_align = dma_get_cache_alignment(); + ret = udma_setup_rx_flush(ud); + if (ret) + return ret; + for (i = 0; i < ud->tchan_cnt; i++) { struct udma_tchan *tchan = &ud->tchans[i]; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index b3c99bb5fe77..fe2eb892a1bd 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -523,4 +523,11 @@ config EDAC_BLUEFIELD Support for error detection and correction on the Mellanox BlueField SoCs. +config EDAC_DMC520 + tristate "ARM DMC-520 ECC" + depends on ARM64 + help + Support for error detection and correction on the + SoCs with ARM DMC-520 DRAM controller. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index d77200c9680b..269e15118cea 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -87,3 +87,4 @@ obj-$(CONFIG_EDAC_TI) += ti_edac.o obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o +obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9fbad908a854..f91f3bc1e0b2 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3626,13 +3626,13 @@ static void setup_pci_device(void) } static const struct x86_cpu_id amd64_cpuids[] = { - { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, - { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL), + X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL), + X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL), + X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL), + X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL), + X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL), + X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL), { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c index 7f227bdcbc84..a7502ebe9bdc 100644 --- a/drivers/edac/armada_xp_edac.c +++ b/drivers/edac/armada_xp_edac.c @@ -429,26 +429,26 @@ static void aurora_l2_check(struct edac_device_ctl_info *dci) src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF; if (src <= 3) - len += snprintf(msg+len, size-len, "src=CPU%d ", src); + len += scnprintf(msg+len, size-len, "src=CPU%d ", src); else - len += snprintf(msg+len, size-len, "src=IO "); + len += scnprintf(msg+len, size-len, "src=IO "); txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF; switch (txn) { case 0: - len += snprintf(msg+len, size-len, "txn=Data-Read "); + len += scnprintf(msg+len, size-len, "txn=Data-Read "); break; case 1: - len += snprintf(msg+len, size-len, "txn=Isn-Read "); + len += scnprintf(msg+len, size-len, "txn=Isn-Read "); break; case 2: - len += snprintf(msg+len, size-len, "txn=Clean-Flush "); + len += scnprintf(msg+len, size-len, "txn=Clean-Flush "); break; case 3: - len += snprintf(msg+len, size-len, "txn=Eviction "); + len += scnprintf(msg+len, size-len, "txn=Eviction "); break; case 4: - len += snprintf(msg+len, size-len, + len += scnprintf(msg+len, size-len, "txn=Read-Modify-Write "); break; } @@ -456,19 +456,19 @@ static void aurora_l2_check(struct edac_device_ctl_info *dci) err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF; switch (err) { case 0: - len += snprintf(msg+len, size-len, "err=CorrECC "); + len += scnprintf(msg+len, size-len, "err=CorrECC "); break; case 1: - len += snprintf(msg+len, size-len, "err=UnCorrECC "); + len += scnprintf(msg+len, size-len, "err=UnCorrECC "); break; case 2: - len += snprintf(msg+len, size-len, "err=TagParity "); + len += scnprintf(msg+len, size-len, "err=TagParity "); break; } - len += snprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK); - len += snprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF); - len += snprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET); + len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK); + len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF); + len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET); /* clear error capture registers */ writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG); diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c new file mode 100644 index 000000000000..fc1153ab1ebb --- /dev/null +++ b/drivers/edac/dmc520_edac.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * EDAC driver for DMC-520 memory controller. + * + * The driver supports 10 interrupt lines, + * though only dram_ecc_errc and dram_ecc_errd are currently handled. + * + * Authors: Rui Zhao <ruizhao@microsoft.com> + * Lei Wang <lewan@microsoft.com> + * Shiping Ji <shji@microsoft.com> + */ + +#include <linux/bitfield.h> +#include <linux/edac.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include "edac_mc.h" + +/* DMC-520 registers */ +#define REG_OFFSET_FEATURE_CONFIG 0x130 +#define REG_OFFSET_ECC_ERRC_COUNT_31_00 0x158 +#define REG_OFFSET_ECC_ERRC_COUNT_63_32 0x15C +#define REG_OFFSET_ECC_ERRD_COUNT_31_00 0x160 +#define REG_OFFSET_ECC_ERRD_COUNT_63_32 0x164 +#define REG_OFFSET_INTERRUPT_CONTROL 0x500 +#define REG_OFFSET_INTERRUPT_CLR 0x508 +#define REG_OFFSET_INTERRUPT_STATUS 0x510 +#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 0x528 +#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 0x52C +#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00 0x530 +#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32 0x534 +#define REG_OFFSET_ADDRESS_CONTROL_NOW 0x1010 +#define REG_OFFSET_MEMORY_TYPE_NOW 0x1128 +#define REG_OFFSET_SCRUB_CONTROL0_NOW 0x1170 +#define REG_OFFSET_FORMAT_CONTROL 0x18 + +/* DMC-520 types, masks and bitfields */ +#define RAM_ECC_INT_CE_BIT BIT(0) +#define RAM_ECC_INT_UE_BIT BIT(1) +#define DRAM_ECC_INT_CE_BIT BIT(2) +#define DRAM_ECC_INT_UE_BIT BIT(3) +#define FAILED_ACCESS_INT_BIT BIT(4) +#define FAILED_PROG_INT_BIT BIT(5) +#define LINK_ERR_INT_BIT BIT(6) +#define TEMPERATURE_EVENT_INT_BIT BIT(7) +#define ARCH_FSM_INT_BIT BIT(8) +#define PHY_REQUEST_INT_BIT BIT(9) +#define MEMORY_WIDTH_MASK GENMASK(1, 0) +#define SCRUB_TRIGGER0_NEXT_MASK GENMASK(1, 0) +#define REG_FIELD_DRAM_ECC_ENABLED GENMASK(1, 0) +#define REG_FIELD_MEMORY_TYPE GENMASK(2, 0) +#define REG_FIELD_DEVICE_WIDTH GENMASK(9, 8) +#define REG_FIELD_ADDRESS_CONTROL_COL GENMASK(2, 0) +#define REG_FIELD_ADDRESS_CONTROL_ROW GENMASK(10, 8) +#define REG_FIELD_ADDRESS_CONTROL_BANK GENMASK(18, 16) +#define REG_FIELD_ADDRESS_CONTROL_RANK GENMASK(25, 24) +#define REG_FIELD_ERR_INFO_LOW_VALID BIT(0) +#define REG_FIELD_ERR_INFO_LOW_COL GENMASK(10, 1) +#define REG_FIELD_ERR_INFO_LOW_ROW GENMASK(28, 11) +#define REG_FIELD_ERR_INFO_LOW_RANK GENMASK(31, 29) +#define REG_FIELD_ERR_INFO_HIGH_BANK GENMASK(3, 0) +#define REG_FIELD_ERR_INFO_HIGH_VALID BIT(31) + +#define DRAM_ADDRESS_CONTROL_MIN_COL_BITS 8 +#define DRAM_ADDRESS_CONTROL_MIN_ROW_BITS 11 + +#define DMC520_SCRUB_TRIGGER_ERR_DETECT 2 +#define DMC520_SCRUB_TRIGGER_IDLE 3 + +/* Driver settings */ +/* + * The max-length message would be: "rank:7 bank:15 row:262143 col:1023". + * Max length is 34. Using a 40-size buffer is enough. + */ +#define DMC520_MSG_BUF_SIZE 40 +#define EDAC_MOD_NAME "dmc520-edac" +#define EDAC_CTL_NAME "dmc520" + +/* the data bus width for the attached memory chips. */ +enum dmc520_mem_width { + MEM_WIDTH_X32 = 2, + MEM_WIDTH_X64 = 3 +}; + +/* memory type */ +enum dmc520_mem_type { + MEM_TYPE_DDR3 = 1, + MEM_TYPE_DDR4 = 2 +}; + +/* memory device width */ +enum dmc520_dev_width { + DEV_WIDTH_X4 = 0, + DEV_WIDTH_X8 = 1, + DEV_WIDTH_X16 = 2 +}; + +struct ecc_error_info { + u32 col; + u32 row; + u32 bank; + u32 rank; +}; + +/* The interrupt config */ +struct dmc520_irq_config { + char *name; + int mask; +}; + +/* The interrupt mappings */ +static struct dmc520_irq_config dmc520_irq_configs[] = { + { + .name = "ram_ecc_errc", + .mask = RAM_ECC_INT_CE_BIT + }, + { + .name = "ram_ecc_errd", + .mask = RAM_ECC_INT_UE_BIT + }, + { + .name = "dram_ecc_errc", + .mask = DRAM_ECC_INT_CE_BIT + }, + { + .name = "dram_ecc_errd", + .mask = DRAM_ECC_INT_UE_BIT + }, + { + .name = "failed_access", + .mask = FAILED_ACCESS_INT_BIT + }, + { + .name = "failed_prog", + .mask = FAILED_PROG_INT_BIT + }, + { + .name = "link_err", + .mask = LINK_ERR_INT_BIT + }, + { + .name = "temperature_event", + .mask = TEMPERATURE_EVENT_INT_BIT + }, + { + .name = "arch_fsm", + .mask = ARCH_FSM_INT_BIT + }, + { + .name = "phy_request", + .mask = PHY_REQUEST_INT_BIT + } +}; + +#define NUMBER_OF_IRQS ARRAY_SIZE(dmc520_irq_configs) + +/* + * The EDAC driver private data. + * error_lock is to protect concurrent writes to the mci->error_desc through + * edac_mc_handle_error(). + */ +struct dmc520_edac { + void __iomem *reg_base; + spinlock_t error_lock; + u32 mem_width_in_bytes; + int irqs[NUMBER_OF_IRQS]; + int masks[NUMBER_OF_IRQS]; +}; + +static int dmc520_mc_idx; + +static u32 dmc520_read_reg(struct dmc520_edac *pvt, u32 offset) +{ + return readl(pvt->reg_base + offset); +} + +static void dmc520_write_reg(struct dmc520_edac *pvt, u32 val, u32 offset) +{ + writel(val, pvt->reg_base + offset); +} + +static u32 dmc520_calc_dram_ecc_error(u32 value) +{ + u32 total = 0; + + /* Each rank's error counter takes one byte. */ + while (value > 0) { + total += (value & 0xFF); + value >>= 8; + } + return total; +} + +static u32 dmc520_get_dram_ecc_error_count(struct dmc520_edac *pvt, + bool is_ce) +{ + u32 reg_offset_low, reg_offset_high; + u32 err_low, err_high; + u32 err_count; + + reg_offset_low = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_31_00 : + REG_OFFSET_ECC_ERRD_COUNT_31_00; + reg_offset_high = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_63_32 : + REG_OFFSET_ECC_ERRD_COUNT_63_32; + + err_low = dmc520_read_reg(pvt, reg_offset_low); + err_high = dmc520_read_reg(pvt, reg_offset_high); + /* Reset error counters */ + dmc520_write_reg(pvt, 0, reg_offset_low); + dmc520_write_reg(pvt, 0, reg_offset_high); + + err_count = dmc520_calc_dram_ecc_error(err_low) + + dmc520_calc_dram_ecc_error(err_high); + + return err_count; +} + +static void dmc520_get_dram_ecc_error_info(struct dmc520_edac *pvt, + bool is_ce, + struct ecc_error_info *info) +{ + u32 reg_offset_low, reg_offset_high; + u32 reg_val_low, reg_val_high; + bool valid; + + reg_offset_low = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 : + REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00; + reg_offset_high = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 : + REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32; + + reg_val_low = dmc520_read_reg(pvt, reg_offset_low); + reg_val_high = dmc520_read_reg(pvt, reg_offset_high); + + valid = (FIELD_GET(REG_FIELD_ERR_INFO_LOW_VALID, reg_val_low) != 0) && + (FIELD_GET(REG_FIELD_ERR_INFO_HIGH_VALID, reg_val_high) != 0); + + if (valid) { + info->col = FIELD_GET(REG_FIELD_ERR_INFO_LOW_COL, reg_val_low); + info->row = FIELD_GET(REG_FIELD_ERR_INFO_LOW_ROW, reg_val_low); + info->rank = FIELD_GET(REG_FIELD_ERR_INFO_LOW_RANK, reg_val_low); + info->bank = FIELD_GET(REG_FIELD_ERR_INFO_HIGH_BANK, reg_val_high); + } else { + memset(info, 0, sizeof(*info)); + } +} + +static bool dmc520_is_ecc_enabled(void __iomem *reg_base) +{ + u32 reg_val = readl(reg_base + REG_OFFSET_FEATURE_CONFIG); + + return FIELD_GET(REG_FIELD_DRAM_ECC_ENABLED, reg_val); +} + +static enum scrub_type dmc520_get_scrub_type(struct dmc520_edac *pvt) +{ + enum scrub_type type = SCRUB_NONE; + u32 reg_val, scrub_cfg; + + reg_val = dmc520_read_reg(pvt, REG_OFFSET_SCRUB_CONTROL0_NOW); + scrub_cfg = FIELD_GET(SCRUB_TRIGGER0_NEXT_MASK, reg_val); + + if (scrub_cfg == DMC520_SCRUB_TRIGGER_ERR_DETECT || + scrub_cfg == DMC520_SCRUB_TRIGGER_IDLE) + type = SCRUB_HW_PROG; + + return type; +} + +/* Get the memory data bus width, in number of bytes. */ +static u32 dmc520_get_memory_width(struct dmc520_edac *pvt) +{ + enum dmc520_mem_width mem_width_field; + u32 mem_width_in_bytes = 0; + u32 reg_val; + + reg_val = dmc520_read_reg(pvt, REG_OFFSET_FORMAT_CONTROL); + mem_width_field = FIELD_GET(MEMORY_WIDTH_MASK, reg_val); + + if (mem_width_field == MEM_WIDTH_X32) + mem_width_in_bytes = 4; + else if (mem_width_field == MEM_WIDTH_X64) + mem_width_in_bytes = 8; + return mem_width_in_bytes; +} + +static enum mem_type dmc520_get_mtype(struct dmc520_edac *pvt) +{ + enum mem_type mt = MEM_UNKNOWN; + enum dmc520_mem_type type; + u32 reg_val; + + reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW); + type = FIELD_GET(REG_FIELD_MEMORY_TYPE, reg_val); + + switch (type) { + case MEM_TYPE_DDR3: + mt = MEM_DDR3; + break; + + case MEM_TYPE_DDR4: + mt = MEM_DDR4; + break; + } + + return mt; +} + +static enum dev_type dmc520_get_dtype(struct dmc520_edac *pvt) +{ + enum dmc520_dev_width device_width; + enum dev_type dt = DEV_UNKNOWN; + u32 reg_val; + + reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW); + device_width = FIELD_GET(REG_FIELD_DEVICE_WIDTH, reg_val); + + switch (device_width) { + case DEV_WIDTH_X4: + dt = DEV_X4; + break; + + case DEV_WIDTH_X8: + dt = DEV_X8; + break; + + case DEV_WIDTH_X16: + dt = DEV_X16; + break; + } + + return dt; +} + +static u32 dmc520_get_rank_count(void __iomem *reg_base) +{ + u32 reg_val, rank_bits; + + reg_val = readl(reg_base + REG_OFFSET_ADDRESS_CONTROL_NOW); + rank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_RANK, reg_val); + + return BIT(rank_bits); +} + +static u64 dmc520_get_rank_size(struct dmc520_edac *pvt) +{ + u32 reg_val, col_bits, row_bits, bank_bits; + + reg_val = dmc520_read_reg(pvt, REG_OFFSET_ADDRESS_CONTROL_NOW); + + col_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_COL, reg_val) + + DRAM_ADDRESS_CONTROL_MIN_COL_BITS; + row_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_ROW, reg_val) + + DRAM_ADDRESS_CONTROL_MIN_ROW_BITS; + bank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_BANK, reg_val); + + return (u64)pvt->mem_width_in_bytes << (col_bits + row_bits + bank_bits); +} + +static void dmc520_handle_dram_ecc_errors(struct mem_ctl_info *mci, + bool is_ce) +{ + struct dmc520_edac *pvt = mci->pvt_info; + char message[DMC520_MSG_BUF_SIZE]; + struct ecc_error_info info; + u32 cnt; + + dmc520_get_dram_ecc_error_info(pvt, is_ce, &info); + + cnt = dmc520_get_dram_ecc_error_count(pvt, is_ce); + if (!cnt) + return; + + snprintf(message, ARRAY_SIZE(message), + "rank:%d bank:%d row:%d col:%d", + info.rank, info.bank, + info.row, info.col); + + spin_lock(&pvt->error_lock); + edac_mc_handle_error((is_ce ? HW_EVENT_ERR_CORRECTED : + HW_EVENT_ERR_UNCORRECTED), + mci, cnt, 0, 0, 0, info.rank, -1, -1, + message, ""); + spin_unlock(&pvt->error_lock); +} + +static irqreturn_t dmc520_edac_dram_ecc_isr(int irq, struct mem_ctl_info *mci, + bool is_ce) +{ + struct dmc520_edac *pvt = mci->pvt_info; + u32 i_mask; + + i_mask = is_ce ? DRAM_ECC_INT_CE_BIT : DRAM_ECC_INT_UE_BIT; + + dmc520_handle_dram_ecc_errors(mci, is_ce); + + dmc520_write_reg(pvt, i_mask, REG_OFFSET_INTERRUPT_CLR); + + return IRQ_HANDLED; +} + +static irqreturn_t dmc520_edac_dram_all_isr(int irq, struct mem_ctl_info *mci, + u32 irq_mask) +{ + struct dmc520_edac *pvt = mci->pvt_info; + irqreturn_t irq_ret = IRQ_NONE; + u32 status; + + status = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_STATUS); + + if ((irq_mask & DRAM_ECC_INT_CE_BIT) && + (status & DRAM_ECC_INT_CE_BIT)) + irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, true); + + if ((irq_mask & DRAM_ECC_INT_UE_BIT) && + (status & DRAM_ECC_INT_UE_BIT)) + irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, false); + + return irq_ret; +} + +static irqreturn_t dmc520_isr(int irq, void *data) +{ + struct mem_ctl_info *mci = data; + struct dmc520_edac *pvt = mci->pvt_info; + u32 mask = 0; + int idx; + + for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { + if (pvt->irqs[idx] == irq) { + mask = pvt->masks[idx]; + break; + } + } + return dmc520_edac_dram_all_isr(irq, mci, mask); +} + +static void dmc520_init_csrow(struct mem_ctl_info *mci) +{ + struct dmc520_edac *pvt = mci->pvt_info; + struct csrow_info *csi; + struct dimm_info *dimm; + u32 pages_per_rank; + enum dev_type dt; + enum mem_type mt; + int row, ch; + u64 rs; + + dt = dmc520_get_dtype(pvt); + mt = dmc520_get_mtype(pvt); + rs = dmc520_get_rank_size(pvt); + pages_per_rank = rs >> PAGE_SHIFT; + + for (row = 0; row < mci->nr_csrows; row++) { + csi = mci->csrows[row]; + + for (ch = 0; ch < csi->nr_channels; ch++) { + dimm = csi->channels[ch]->dimm; + dimm->grain = pvt->mem_width_in_bytes; + dimm->dtype = dt; + dimm->mtype = mt; + dimm->edac_mode = EDAC_FLAG_SECDED; + dimm->nr_pages = pages_per_rank / csi->nr_channels; + } + } +} + +static int dmc520_edac_probe(struct platform_device *pdev) +{ + bool registered[NUMBER_OF_IRQS] = { false }; + int irqs[NUMBER_OF_IRQS] = { -ENXIO }; + int masks[NUMBER_OF_IRQS] = { 0 }; + struct edac_mc_layer layers[1]; + struct dmc520_edac *pvt = NULL; + struct mem_ctl_info *mci; + void __iomem *reg_base; + u32 irq_mask_all = 0; + struct resource *res; + struct device *dev; + int ret, idx, irq; + u32 reg_val; + + /* Parse the device node */ + dev = &pdev->dev; + + for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { + irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name); + irqs[idx] = irq; + masks[idx] = dmc520_irq_configs[idx].mask; + if (irq >= 0) { + irq_mask_all |= dmc520_irq_configs[idx].mask; + edac_dbg(0, "Discovered %s, irq: %d.\n", dmc520_irq_configs[idx].name, irq); + } + } + + if (!irq_mask_all) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, + "At least one valid interrupt line is expected.\n"); + return -EINVAL; + } + + /* Initialize dmc520 edac */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_base)) + return PTR_ERR(reg_base); + + if (!dmc520_is_ecc_enabled(reg_base)) + return -ENXIO; + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = dmc520_get_rank_count(reg_base); + layers[0].is_virt_csrow = true; + + mci = edac_mc_alloc(dmc520_mc_idx++, ARRAY_SIZE(layers), layers, sizeof(*pvt)); + if (!mci) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, + "Failed to allocate memory for mc instance\n"); + ret = -ENOMEM; + goto err; + } + + pvt = mci->pvt_info; + + pvt->reg_base = reg_base; + spin_lock_init(&pvt->error_lock); + memcpy(pvt->irqs, irqs, sizeof(irqs)); + memcpy(pvt->masks, masks, sizeof(masks)); + + platform_set_drvdata(pdev, mci); + + mci->pdev = dev; + mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + mci->scrub_mode = dmc520_get_scrub_type(pvt); + mci->ctl_name = EDAC_CTL_NAME; + mci->dev_name = dev_name(mci->pdev); + mci->mod_name = EDAC_MOD_NAME; + + edac_op_state = EDAC_OPSTATE_INT; + + pvt->mem_width_in_bytes = dmc520_get_memory_width(pvt); + + dmc520_init_csrow(mci); + + /* Clear interrupts, not affecting other unrelated interrupts */ + reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL); + dmc520_write_reg(pvt, reg_val & (~irq_mask_all), + REG_OFFSET_INTERRUPT_CONTROL); + dmc520_write_reg(pvt, irq_mask_all, REG_OFFSET_INTERRUPT_CLR); + + for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { + irq = irqs[idx]; + if (irq >= 0) { + ret = devm_request_irq(&pdev->dev, irq, + dmc520_isr, IRQF_SHARED, + dev_name(&pdev->dev), mci); + if (ret < 0) { + edac_printk(KERN_ERR, EDAC_MC, + "Failed to request irq %d\n", irq); + goto err; + } + registered[idx] = true; + } + } + + /* Reset DRAM CE/UE counters */ + if (irq_mask_all & DRAM_ECC_INT_CE_BIT) + dmc520_get_dram_ecc_error_count(pvt, true); + + if (irq_mask_all & DRAM_ECC_INT_UE_BIT) + dmc520_get_dram_ecc_error_count(pvt, false); + + ret = edac_mc_add_mc(mci); + if (ret) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, + "Failed to register with EDAC core\n"); + goto err; + } + + /* Enable interrupts, not affecting other unrelated interrupts */ + dmc520_write_reg(pvt, reg_val | irq_mask_all, + REG_OFFSET_INTERRUPT_CONTROL); + + return 0; + +err: + for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { + if (registered[idx]) + devm_free_irq(&pdev->dev, pvt->irqs[idx], mci); + } + if (mci) + edac_mc_free(mci); + + return ret; +} + +static int dmc520_edac_remove(struct platform_device *pdev) +{ + u32 reg_val, idx, irq_mask_all = 0; + struct mem_ctl_info *mci; + struct dmc520_edac *pvt; + + mci = platform_get_drvdata(pdev); + pvt = mci->pvt_info; + + /* Disable interrupts */ + reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL); + dmc520_write_reg(pvt, reg_val & (~irq_mask_all), + REG_OFFSET_INTERRUPT_CONTROL); + + /* free irq's */ + for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { + if (pvt->irqs[idx] >= 0) { + irq_mask_all |= pvt->masks[idx]; + devm_free_irq(&pdev->dev, pvt->irqs[idx], mci); + } + } + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + + return 0; +} + +static const struct of_device_id dmc520_edac_driver_id[] = { + { .compatible = "arm,dmc-520", }, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, dmc520_edac_driver_id); + +static struct platform_driver dmc520_edac_driver = { + .driver = { + .name = "dmc520", + .of_match_table = dmc520_edac_driver_id, + }, + + .probe = dmc520_edac_probe, + .remove = dmc520_edac_remove +}; + +module_platform_driver(dmc520_edac_driver); + +MODULE_AUTHOR("Rui Zhao <ruizhao@microsoft.com>"); +MODULE_AUTHOR("Lei Wang <lewan@microsoft.com>"); +MODULE_AUTHOR("Shiping Ji <shji@microsoft.com>"); +MODULE_DESCRIPTION("DMC-520 ECC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 69e0d90460e6..75ede27bdf6a 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -55,6 +55,11 @@ static LIST_HEAD(mc_devices); */ static const char *edac_mc_owner; +static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) +{ + return container_of(e, struct mem_ctl_info, error_desc); +} + int edac_get_report_status(void) { return edac_report; @@ -278,6 +283,12 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) static void _edac_mc_free(struct mem_ctl_info *mci) { + put_device(&mci->dev); +} + +static void mci_release(struct device *dev) +{ + struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); struct csrow_info *csr; int i, chn, row; @@ -305,103 +316,26 @@ static void _edac_mc_free(struct mem_ctl_info *mci) kfree(mci); } -struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, - unsigned int n_layers, - struct edac_mc_layer *layers, - unsigned int sz_pvt) +static int edac_mc_alloc_csrows(struct mem_ctl_info *mci) { - struct mem_ctl_info *mci; - struct edac_mc_layer *layer; - struct csrow_info *csr; - struct rank_info *chan; - struct dimm_info *dimm; - u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; - unsigned int pos[EDAC_MAX_LAYERS]; - unsigned int idx, size, tot_dimms = 1, count = 1; - unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0; - void *pvt, *p, *ptr = NULL; - int i, j, row, chn, n, len; - bool per_rank = false; - - if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0)) - return NULL; - - /* - * Calculate the total amount of dimms and csrows/cschannels while - * in the old API emulation mode - */ - for (idx = 0; idx < n_layers; idx++) { - tot_dimms *= layers[idx].size; - - if (layers[idx].is_virt_csrow) - tot_csrows *= layers[idx].size; - else - tot_channels *= layers[idx].size; - - if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT) - per_rank = true; - } - - /* Figure out the offsets of the various items from the start of an mc - * structure. We want the alignment of each item to be at least as - * stringent as what the compiler would provide if we could simply - * hardcode everything into a single struct. - */ - mci = edac_align_ptr(&ptr, sizeof(*mci), 1); - layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); - for (i = 0; i < n_layers; i++) { - count *= layers[i].size; - edac_dbg(4, "errcount layer %d size %d\n", i, count); - ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); - ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); - tot_errcount += 2 * count; - } - - edac_dbg(4, "allocating %d error counters\n", tot_errcount); - pvt = edac_align_ptr(&ptr, sz_pvt, 1); - size = ((unsigned long)pvt) + sz_pvt; - - edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", - size, - tot_dimms, - per_rank ? "ranks" : "dimms", - tot_csrows * tot_channels); - - mci = kzalloc(size, GFP_KERNEL); - if (mci == NULL) - return NULL; - - /* Adjust pointers so they point within the memory we just allocated - * rather than an imaginary chunk of memory located at address 0. - */ - layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); - for (i = 0; i < n_layers; i++) { - mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); - mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); - } - pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; - - /* setup index and various internal pointers */ - mci->mc_idx = mc_num; - mci->tot_dimms = tot_dimms; - mci->pvt_info = pvt; - mci->n_layers = n_layers; - mci->layers = layer; - memcpy(mci->layers, layers, sizeof(*layer) * n_layers); - mci->nr_csrows = tot_csrows; - mci->num_cschannel = tot_channels; - mci->csbased = per_rank; + unsigned int tot_channels = mci->num_cschannel; + unsigned int tot_csrows = mci->nr_csrows; + unsigned int row, chn; /* * Alocate and fill the csrow/channels structs */ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); if (!mci->csrows) - goto error; + return -ENOMEM; + for (row = 0; row < tot_csrows; row++) { + struct csrow_info *csr; + csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); if (!csr) - goto error; + return -ENOMEM; + mci->csrows[row] = csr; csr->csrow_idx = row; csr->mci = mci; @@ -409,34 +343,51 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), GFP_KERNEL); if (!csr->channels) - goto error; + return -ENOMEM; for (chn = 0; chn < tot_channels; chn++) { + struct rank_info *chan; + chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); if (!chan) - goto error; + return -ENOMEM; + csr->channels[chn] = chan; chan->chan_idx = chn; chan->csrow = csr; } } + return 0; +} + +static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) +{ + unsigned int pos[EDAC_MAX_LAYERS]; + unsigned int row, chn, idx; + int layer; + void *p; + /* * Allocate and fill the dimm structs */ - mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); + mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); if (!mci->dimms) - goto error; + return -ENOMEM; memset(&pos, 0, sizeof(pos)); row = 0; chn = 0; - for (idx = 0; idx < tot_dimms; idx++) { + for (idx = 0; idx < mci->tot_dimms; idx++) { + struct dimm_info *dimm; + struct rank_info *chan; + int n, len; + chan = mci->csrows[row]->channels[chn]; dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); if (!dimm) - goto error; + return -ENOMEM; mci->dimms[idx] = dimm; dimm->mci = mci; dimm->idx = idx; @@ -446,16 +397,16 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, */ len = sizeof(dimm->label); p = dimm->label; - n = snprintf(p, len, "mc#%u", mc_num); + n = snprintf(p, len, "mc#%u", mci->mc_idx); p += n; len -= n; - for (j = 0; j < n_layers; j++) { + for (layer = 0; layer < mci->n_layers; layer++) { n = snprintf(p, len, "%s#%u", - edac_layer_name[layers[j].type], - pos[j]); + edac_layer_name[mci->layers[layer].type], + pos[layer]); p += n; len -= n; - dimm->location[j] = pos[j]; + dimm->location[layer] = pos[layer]; if (len <= 0) break; @@ -467,29 +418,109 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, dimm->cschannel = chn; /* Increment csrow location */ - if (layers[0].is_virt_csrow) { + if (mci->layers[0].is_virt_csrow) { chn++; - if (chn == tot_channels) { + if (chn == mci->num_cschannel) { chn = 0; row++; } } else { row++; - if (row == tot_csrows) { + if (row == mci->nr_csrows) { row = 0; chn++; } } /* Increment dimm location */ - for (j = n_layers - 1; j >= 0; j--) { - pos[j]++; - if (pos[j] < layers[j].size) + for (layer = mci->n_layers - 1; layer >= 0; layer--) { + pos[layer]++; + if (pos[layer] < mci->layers[layer].size) break; - pos[j] = 0; + pos[layer] = 0; } } + return 0; +} + +struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, + unsigned int n_layers, + struct edac_mc_layer *layers, + unsigned int sz_pvt) +{ + struct mem_ctl_info *mci; + struct edac_mc_layer *layer; + unsigned int idx, size, tot_dimms = 1; + unsigned int tot_csrows = 1, tot_channels = 1; + void *pvt, *ptr = NULL; + bool per_rank = false; + + if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0)) + return NULL; + + /* + * Calculate the total amount of dimms and csrows/cschannels while + * in the old API emulation mode + */ + for (idx = 0; idx < n_layers; idx++) { + tot_dimms *= layers[idx].size; + + if (layers[idx].is_virt_csrow) + tot_csrows *= layers[idx].size; + else + tot_channels *= layers[idx].size; + + if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT) + per_rank = true; + } + + /* Figure out the offsets of the various items from the start of an mc + * structure. We want the alignment of each item to be at least as + * stringent as what the compiler would provide if we could simply + * hardcode everything into a single struct. + */ + mci = edac_align_ptr(&ptr, sizeof(*mci), 1); + layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); + pvt = edac_align_ptr(&ptr, sz_pvt, 1); + size = ((unsigned long)pvt) + sz_pvt; + + edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", + size, + tot_dimms, + per_rank ? "ranks" : "dimms", + tot_csrows * tot_channels); + + mci = kzalloc(size, GFP_KERNEL); + if (mci == NULL) + return NULL; + + mci->dev.release = mci_release; + device_initialize(&mci->dev); + + /* Adjust pointers so they point within the memory we just allocated + * rather than an imaginary chunk of memory located at address 0. + */ + layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); + pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; + + /* setup index and various internal pointers */ + mci->mc_idx = mc_num; + mci->tot_dimms = tot_dimms; + mci->pvt_info = pvt; + mci->n_layers = n_layers; + mci->layers = layer; + memcpy(mci->layers, layers, sizeof(*layer) * n_layers); + mci->nr_csrows = tot_csrows; + mci->num_cschannel = tot_channels; + mci->csbased = per_rank; + + if (edac_mc_alloc_csrows(mci)) + goto error; + + if (edac_mc_alloc_dimms(mci)) + goto error; + mci->op_state = OP_ALLOC; return mci; @@ -505,9 +536,6 @@ void edac_mc_free(struct mem_ctl_info *mci) { edac_dbg(1, "\n"); - if (device_is_registered(&mci->dev)) - edac_unregister_sysfs(mci); - _edac_mc_free(mci); } EXPORT_SYMBOL_GPL(edac_mc_free); @@ -902,88 +930,51 @@ const char *edac_layer_name[] = { }; EXPORT_SYMBOL_GPL(edac_layer_name); -static void edac_inc_ce_error(struct mem_ctl_info *mci, - bool enable_per_layer_report, - const int pos[EDAC_MAX_LAYERS], - const u16 count) +static void edac_inc_ce_error(struct edac_raw_error_desc *e) { - int i, index = 0; - - mci->ce_mc += count; - - if (!enable_per_layer_report) { - mci->ce_noinfo_count += count; - return; - } + int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; + struct mem_ctl_info *mci = error_desc_to_mci(e); + struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]); - for (i = 0; i < mci->n_layers; i++) { - if (pos[i] < 0) - break; - index += pos[i]; - mci->ce_per_layer[i][index] += count; + mci->ce_mc += e->error_count; - if (i < mci->n_layers - 1) - index *= mci->layers[i + 1].size; - } + if (dimm) + dimm->ce_count += e->error_count; + else + mci->ce_noinfo_count += e->error_count; } -static void edac_inc_ue_error(struct mem_ctl_info *mci, - bool enable_per_layer_report, - const int pos[EDAC_MAX_LAYERS], - const u16 count) +static void edac_inc_ue_error(struct edac_raw_error_desc *e) { - int i, index = 0; - - mci->ue_mc += count; - - if (!enable_per_layer_report) { - mci->ue_noinfo_count += count; - return; - } + int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; + struct mem_ctl_info *mci = error_desc_to_mci(e); + struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]); - for (i = 0; i < mci->n_layers; i++) { - if (pos[i] < 0) - break; - index += pos[i]; - mci->ue_per_layer[i][index] += count; + mci->ue_mc += e->error_count; - if (i < mci->n_layers - 1) - index *= mci->layers[i + 1].size; - } + if (dimm) + dimm->ue_count += e->error_count; + else + mci->ue_noinfo_count += e->error_count; } -static void edac_ce_error(struct mem_ctl_info *mci, - const u16 error_count, - const int pos[EDAC_MAX_LAYERS], - const char *msg, - const char *location, - const char *label, - const char *detail, - const char *other_detail, - const bool enable_per_layer_report, - const unsigned long page_frame_number, - const unsigned long offset_in_page, - long grain) +static void edac_ce_error(struct edac_raw_error_desc *e) { + struct mem_ctl_info *mci = error_desc_to_mci(e); unsigned long remapped_page; - char *msg_aux = ""; - - if (*msg) - msg_aux = " "; if (edac_mc_get_log_ce()) { - if (other_detail && *other_detail) - edac_mc_printk(mci, KERN_WARNING, - "%d CE %s%son %s (%s %s - %s)\n", - error_count, msg, msg_aux, label, - location, detail, other_detail); - else - edac_mc_printk(mci, KERN_WARNING, - "%d CE %s%son %s (%s %s)\n", - error_count, msg, msg_aux, label, - location, detail); + edac_mc_printk(mci, KERN_WARNING, + "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n", + e->error_count, e->msg, + *e->msg ? " " : "", + e->label, e->location, e->page_frame_number, e->offset_in_page, + e->grain, e->syndrome, + *e->other_detail ? " - " : "", + e->other_detail); } - edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); + + edac_inc_ce_error(e); if (mci->scrub_mode == SCRUB_SW_SRC) { /* @@ -998,60 +989,64 @@ static void edac_ce_error(struct mem_ctl_info *mci, * be scrubbed. */ remapped_page = mci->ctl_page_to_phys ? - mci->ctl_page_to_phys(mci, page_frame_number) : - page_frame_number; + mci->ctl_page_to_phys(mci, e->page_frame_number) : + e->page_frame_number; - edac_mc_scrub_block(remapped_page, - offset_in_page, grain); + edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain); } } -static void edac_ue_error(struct mem_ctl_info *mci, - const u16 error_count, - const int pos[EDAC_MAX_LAYERS], - const char *msg, - const char *location, - const char *label, - const char *detail, - const char *other_detail, - const bool enable_per_layer_report) +static void edac_ue_error(struct edac_raw_error_desc *e) { - char *msg_aux = ""; - - if (*msg) - msg_aux = " "; + struct mem_ctl_info *mci = error_desc_to_mci(e); if (edac_mc_get_log_ue()) { - if (other_detail && *other_detail) - edac_mc_printk(mci, KERN_WARNING, - "%d UE %s%son %s (%s %s - %s)\n", - error_count, msg, msg_aux, label, - location, detail, other_detail); - else - edac_mc_printk(mci, KERN_WARNING, - "%d UE %s%son %s (%s %s)\n", - error_count, msg, msg_aux, label, - location, detail); + edac_mc_printk(mci, KERN_WARNING, + "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n", + e->error_count, e->msg, + *e->msg ? " " : "", + e->label, e->location, e->page_frame_number, e->offset_in_page, + e->grain, + *e->other_detail ? " - " : "", + e->other_detail); } if (edac_mc_get_panic_on_ue()) { - if (other_detail && *other_detail) - panic("UE %s%son %s (%s%s - %s)\n", - msg, msg_aux, label, location, detail, other_detail); - else - panic("UE %s%son %s (%s%s)\n", - msg, msg_aux, label, location, detail); + panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n", + e->msg, + *e->msg ? " " : "", + e->label, e->location, e->page_frame_number, e->offset_in_page, + e->grain, + *e->other_detail ? " - " : "", + e->other_detail); } - edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); + edac_inc_ue_error(e); } -void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, - struct mem_ctl_info *mci, - struct edac_raw_error_desc *e) +static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan) { - char detail[80]; - int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; + struct mem_ctl_info *mci = error_desc_to_mci(e); + enum hw_event_mc_err_type type = e->type; + u16 count = e->error_count; + + if (row < 0) + return; + + edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); + + if (type == HW_EVENT_ERR_CORRECTED) { + mci->csrows[row]->ce_count += count; + if (chan >= 0) + mci->csrows[row]->channels[chan]->ce_count += count; + } else { + mci->csrows[row]->ue_count += count; + } +} + +void edac_raw_mc_handle_error(struct edac_raw_error_desc *e) +{ + struct mem_ctl_info *mci = error_desc_to_mci(e); u8 grain_bits; /* Sanity-check driver-supplied grain value. */ @@ -1062,31 +1057,16 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, /* Report the error via the trace interface */ if (IS_ENABLED(CONFIG_RAS)) - trace_mc_event(type, e->msg, e->label, e->error_count, + trace_mc_event(e->type, e->msg, e->label, e->error_count, mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, grain_bits, e->syndrome, e->other_detail); - /* Memory type dependent details about the error */ - if (type == HW_EVENT_ERR_CORRECTED) { - snprintf(detail, sizeof(detail), - "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", - e->page_frame_number, e->offset_in_page, - e->grain, e->syndrome); - edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, - detail, e->other_detail, e->enable_per_layer_report, - e->page_frame_number, e->offset_in_page, e->grain); - } else { - snprintf(detail, sizeof(detail), - "page:0x%lx offset:0x%lx grain:%ld", - e->page_frame_number, e->offset_in_page, e->grain); - - edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, - detail, e->other_detail, e->enable_per_layer_report); - } - - + if (e->type == HW_EVENT_ERR_CORRECTED) + edac_ce_error(e); + else + edac_ue_error(e); } EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); @@ -1108,25 +1088,27 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; int i, n_labels = 0; struct edac_raw_error_desc *e = &mci->error_desc; + bool any_memory = true; edac_dbg(3, "MC%d\n", mci->mc_idx); /* Fills the error report buffer */ memset(e, 0, sizeof (*e)); e->error_count = error_count; + e->type = type; e->top_layer = top_layer; e->mid_layer = mid_layer; e->low_layer = low_layer; e->page_frame_number = page_frame_number; e->offset_in_page = offset_in_page; e->syndrome = syndrome; - e->msg = msg; - e->other_detail = other_detail; + /* need valid strings here for both: */ + e->msg = msg ?: ""; + e->other_detail = other_detail ?: ""; /* - * Check if the event report is consistent and if the memory - * location is known. If it is known, enable_per_layer_report will be - * true, the DIMM(s) label info will be filled and the per-layer + * Check if the event report is consistent and if the memory location is + * known. If it is, the DIMM(s) label info will be filled and the DIMM's * error counters will be incremented. */ for (i = 0; i < mci->n_layers; i++) { @@ -1145,7 +1127,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, pos[i] = -1; } if (pos[i] >= 0) - e->enable_per_layer_report = true; + any_memory = false; } /* @@ -1176,24 +1158,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, /* * If the error is memory-controller wide, there's no need to - * seek for the affected DIMMs because the whole - * channel/memory controller/... may be affected. - * Also, don't show errors for empty DIMM slots. + * seek for the affected DIMMs because the whole channel/memory + * controller/... may be affected. Also, don't show errors for + * empty DIMM slots. */ - if (!e->enable_per_layer_report || !dimm->nr_pages) + if (!dimm->nr_pages) continue; - if (n_labels >= EDAC_MAX_LABELS) { - e->enable_per_layer_report = false; - break; - } n_labels++; - if (p != e->label) { - strcpy(p, OTHER_LABEL); - p += strlen(OTHER_LABEL); + if (n_labels > EDAC_MAX_LABELS) { + p = e->label; + *p = '\0'; + } else { + if (p != e->label) { + strcpy(p, OTHER_LABEL); + p += strlen(OTHER_LABEL); + } + strcpy(p, dimm->label); + p += strlen(p); } - strcpy(p, dimm->label); - p += strlen(p); /* * get csrow/channel of the DIMM, in order to allow @@ -1213,22 +1196,12 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, chan = -2; } - if (!e->enable_per_layer_report) { + if (any_memory) strcpy(e->label, "any memory"); - } else { - edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); - if (p == e->label) - strcpy(e->label, "unknown memory"); - if (type == HW_EVENT_ERR_CORRECTED) { - if (row >= 0) { - mci->csrows[row]->ce_count += error_count; - if (chan >= 0) - mci->csrows[row]->channels[chan]->ce_count += error_count; - } - } else - if (row >= 0) - mci->csrows[row]->ue_count += error_count; - } + else if (!*e->label) + strcpy(e->label, "unknown memory"); + + edac_inc_csrow(e, row, chan); /* Fill the RAM location data */ p = e->location; @@ -1244,6 +1217,6 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, if (p > e->location) *(p - 1) = '\0'; - edac_raw_mc_handle_error(type, mci, e); + edac_raw_mc_handle_error(e); } EXPORT_SYMBOL_GPL(edac_mc_handle_error); diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index 02aac5c61d00..881b00eadf7a 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h @@ -212,17 +212,13 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, * edac_raw_mc_handle_error() - Reports a memory event to userspace without * doing anything to discover the error location. * - * @type: severity of the error (CE/UE/Fatal) - * @mci: a struct mem_ctl_info pointer * @e: error description * * This raw function is used internally by edac_mc_handle_error(). It should * only be called directly when the hardware error come directly from BIOS, * like in the case of APEI GHES driver. */ -void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, - struct mem_ctl_info *mci, - struct edac_raw_error_desc *e); +void edac_raw_mc_handle_error(struct edac_raw_error_desc *e); /** * edac_mc_handle_error() - Reports a memory event to userspace. diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index c70ec0a306d8..4e6aca595133 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -274,14 +274,8 @@ static const struct attribute_group *csrow_attr_groups[] = { NULL }; -static void csrow_attr_release(struct device *dev) -{ - /* release device with _edac_mc_free() */ -} - static const struct device_type csrow_attr_type = { .groups = csrow_attr_groups, - .release = csrow_attr_release, }; /* @@ -387,6 +381,14 @@ static const struct attribute_group *csrow_dev_groups[] = { NULL }; +static void csrow_release(struct device *dev) +{ + /* + * Nothing to do, just unregister sysfs here. The mci + * device owns the data and will also release it. + */ +} + static inline int nr_pages_per_csrow(struct csrow_info *csrow) { int chan, nr_pages = 0; @@ -405,6 +407,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci, csrow->dev.type = &csrow_attr_type; csrow->dev.groups = csrow_dev_groups; + csrow->dev.release = csrow_release; device_initialize(&csrow->dev); csrow->dev.parent = &mci->dev; csrow->mci = mci; @@ -441,10 +444,8 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci) error: for (--i; i >= 0; i--) { - csrow = mci->csrows[i]; - if (!nr_pages_per_csrow(csrow)) - continue; - device_unregister(&mci->csrows[i]->dev); + if (device_is_registered(&mci->csrows[i]->dev)) + device_unregister(&mci->csrows[i]->dev); } return err; @@ -453,15 +454,13 @@ error: static void edac_delete_csrow_objects(struct mem_ctl_info *mci) { int i; - struct csrow_info *csrow; - for (i = mci->nr_csrows - 1; i >= 0; i--) { - csrow = mci->csrows[i]; - if (!nr_pages_per_csrow(csrow)) - continue; - device_unregister(&mci->csrows[i]->dev); + for (i = 0; i < mci->nr_csrows; i++) { + if (device_is_registered(&mci->csrows[i]->dev)) + device_unregister(&mci->csrows[i]->dev); } } + #endif /* @@ -552,10 +551,8 @@ static ssize_t dimmdev_ce_count_show(struct device *dev, char *data) { struct dimm_info *dimm = to_dimm(dev); - u32 count; - count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][dimm->idx]; - return sprintf(data, "%u\n", count); + return sprintf(data, "%u\n", dimm->ce_count); } static ssize_t dimmdev_ue_count_show(struct device *dev, @@ -563,10 +560,8 @@ static ssize_t dimmdev_ue_count_show(struct device *dev, char *data) { struct dimm_info *dimm = to_dimm(dev); - u32 count; - count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][dimm->idx]; - return sprintf(data, "%u\n", count); + return sprintf(data, "%u\n", dimm->ue_count); } /* dimm/rank attribute files */ @@ -602,16 +597,18 @@ static const struct attribute_group *dimm_attr_groups[] = { NULL }; -static void dimm_attr_release(struct device *dev) -{ - /* release device with _edac_mc_free() */ -} - static const struct device_type dimm_attr_type = { .groups = dimm_attr_groups, - .release = dimm_attr_release, }; +static void dimm_release(struct device *dev) +{ + /* + * Nothing to do, just unregister sysfs here. The mci + * device owns the data and will also release it. + */ +} + /* Create a DIMM object under specifed memory controller device */ static int edac_create_dimm_object(struct mem_ctl_info *mci, struct dimm_info *dimm) @@ -620,6 +617,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci, dimm->mci = mci; dimm->dev.type = &dimm_attr_type; + dimm->dev.release = dimm_release; device_initialize(&dimm->dev); dimm->dev.parent = &mci->dev; @@ -659,7 +657,9 @@ static ssize_t mci_reset_counters_store(struct device *dev, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); - int cnt, row, chan, i; + struct dimm_info *dimm; + int row, chan; + mci->ue_mc = 0; mci->ce_mc = 0; mci->ue_noinfo_count = 0; @@ -675,11 +675,9 @@ static ssize_t mci_reset_counters_store(struct device *dev, ri->channels[chan]->ce_count = 0; } - cnt = 1; - for (i = 0; i < mci->n_layers; i++) { - cnt *= mci->layers[i].size; - memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); - memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); + mci_for_each_dimm(mci, dimm) { + dimm->ue_count = 0; + dimm->ce_count = 0; } mci->start_time = jiffies; @@ -884,14 +882,8 @@ static const struct attribute_group *mci_attr_groups[] = { NULL }; -static void mci_attr_release(struct device *dev) -{ - /* release device with _edac_mc_free() */ -} - static const struct device_type mci_attr_type = { .groups = mci_attr_groups, - .release = mci_attr_release, }; /* @@ -910,8 +902,6 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, /* get the /sys/devices/system/edac subsys reference */ mci->dev.type = &mci_attr_type; - device_initialize(&mci->dev); - mci->dev.parent = mci_pdev; mci->dev.groups = groups; dev_set_name(&mci->dev, "mc%d", mci->mc_idx); @@ -921,7 +911,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, err = device_add(&mci->dev); if (err < 0) { edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); - put_device(&mci->dev); + /* no put_device() here, free mci with _edac_mc_free() */ return err; } @@ -937,24 +927,20 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, err = edac_create_dimm_object(mci, dimm); if (err) - goto fail_unregister_dimm; + goto fail; } #ifdef CONFIG_EDAC_LEGACY_SYSFS err = edac_create_csrow_objects(mci); if (err < 0) - goto fail_unregister_dimm; + goto fail; #endif edac_create_debugfs_nodes(mci); return 0; -fail_unregister_dimm: - mci_for_each_dimm(mci, dimm) { - if (device_is_registered(&dimm->dev)) - device_unregister(&dimm->dev); - } - device_unregister(&mci->dev); +fail: + edac_remove_sysfs_mci_device(mci); return err; } @@ -966,6 +952,9 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) { struct dimm_info *dimm; + if (!device_is_registered(&mci->dev)) + return; + edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG @@ -976,17 +965,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) #endif mci_for_each_dimm(mci, dimm) { - if (dimm->nr_pages == 0) + if (!device_is_registered(&dimm->dev)) continue; edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev)); device_unregister(&dimm->dev); } -} -void edac_unregister_sysfs(struct mem_ctl_info *mci) -{ - edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev)); - device_unregister(&mci->dev); + /* only remove the device, but keep mci */ + device_del(&mci->dev); } static void mc_attr_release(struct device *dev) @@ -1000,9 +986,6 @@ static void mc_attr_release(struct device *dev) kfree(dev); } -static const struct device_type mc_attr_type = { - .release = mc_attr_release, -}; /* * Init/exit code for the module. Basically, creates/removes /sys/class/rc */ @@ -1015,11 +998,10 @@ int __init edac_mc_sysfs_init(void) return -ENOMEM; mci_pdev->bus = edac_get_sysfs_subsys(); - mci_pdev->type = &mc_attr_type; - device_initialize(mci_pdev); - dev_set_name(mci_pdev, "mc"); + mci_pdev->release = mc_attr_release; + mci_pdev->init_name = "mc"; - err = device_add(mci_pdev); + err = device_register(mci_pdev); if (err < 0) { edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev)); put_device(mci_pdev); diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 388427d378b1..aa1f91688eb8 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -28,7 +28,6 @@ void edac_mc_sysfs_exit(void); extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, const struct attribute_group **groups); extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); -void edac_unregister_sysfs(struct mem_ctl_info *mci); extern int edac_get_log_ue(void); extern int edac_get_log_ce(void); extern int edac_get_panic_on_ue(void); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index b99080d8a10c..cb3dab56a875 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -201,7 +201,6 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg) void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { - enum hw_event_mc_err_type type; struct edac_raw_error_desc *e; struct mem_ctl_info *mci; struct ghes_edac_pvt *pvt; @@ -240,17 +239,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) switch (sev) { case GHES_SEV_CORRECTED: - type = HW_EVENT_ERR_CORRECTED; + e->type = HW_EVENT_ERR_CORRECTED; break; case GHES_SEV_RECOVERABLE: - type = HW_EVENT_ERR_UNCORRECTED; + e->type = HW_EVENT_ERR_UNCORRECTED; break; case GHES_SEV_PANIC: - type = HW_EVENT_ERR_FATAL; + e->type = HW_EVENT_ERR_FATAL; break; default: case GHES_SEV_NO: - type = HW_EVENT_ERR_INFO; + e->type = HW_EVENT_ERR_INFO; } edac_dbg(1, "error validation_bits: 0x%08llx\n", @@ -356,11 +355,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) mem_err->mem_dev_handle); index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle); - if (index >= 0) { + if (index >= 0) e->top_layer = index; - e->enable_per_layer_report = true; - } - } if (p > e->location) *(p - 1) = '\0'; @@ -442,7 +438,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) if (p > pvt->other_detail) *(p - 1) = '\0'; - edac_raw_mc_handle_error(type, mci, e); + edac_raw_mc_handle_error(e); unlock: spin_unlock_irqrestore(&ghes_lock, flags); diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 059eccf0582b..df08de963d10 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -123,10 +123,10 @@ static int i10nm_get_all_munits(void) } static const struct x86_cpu_id i10nm_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 }, - { } + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), + {} }; MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea980c556f2e..8874b7722b2f 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1239,7 +1239,7 @@ static int __init mce_amd_init(void) case 0x17: case 0x18: - pr_warn("Decoding supported only on Scalable MCA processors.\n"); + pr_warn_once("Decoding supported only on Scalable MCA processors.\n"); return -EINVAL; default: diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 933f7722b893..bc47328eb485 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1537,8 +1537,8 @@ static struct dunit_ops dnv_ops = { }; static const struct x86_cpu_id pnd2_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops }, + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops), { } }; MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4957e8ee1879..7d51c82be62b 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3420,13 +3420,13 @@ fail0: } static const struct x86_cpu_id sbridge_cpuids[] = { - INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table), - INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table), - INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table), - INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table), - INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table), - INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table), - INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table), + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table), + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table), { } }; MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c index 83545b4facb7..46a3a3440f5e 100644 --- a/drivers/edac/skx_base.c +++ b/drivers/edac/skx_base.c @@ -158,7 +158,7 @@ fail: } static const struct x86_cpu_id skx_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 }, + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), { } }; MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 2d263382d797..12211dc040e8 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -477,22 +477,16 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) if (p->ce_cnt) { pinf = &p->ceinfo; - if (!priv->p_data->quirks) { + if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, + pinf->bankgrpnr, pinf->blknr, pinf->bitpos, pinf->data); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d ", - pinf->bankgrpnr, pinf->blknr); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, pinf->col, pinf->bitpos, pinf->data); } @@ -503,17 +497,15 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) if (p->ue_cnt) { pinf = &p->ueinfo; - if (!priv->p_data->quirks) { + if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type :%s Row %d Bank %d Col %d ", - "UE", pinf->row, pinf->bank, pinf->col); + "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d", + "UE", pinf->row, pinf->bank, + pinf->bankgrpnr, pinf->blknr); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, "DDR ECC error type :%s Row %d Bank %d Col %d ", "UE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d", - pinf->bankgrpnr, pinf->blknr); } edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index a7f216191493..34b7afffac28 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -107,7 +107,7 @@ struct axp288_extcon_info { }; static const struct x86_cpu_id cherry_trail_cpu_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT, X86_FEATURE_ANY }, + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), {} }; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index ecc83e2f032c..613828d3f106 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -239,6 +239,11 @@ config EFI_DISABLE_PCI_DMA endmenu +config EFI_EMBEDDED_FIRMWARE + bool + depends on EFI + select CRYPTO_LIB_SHA256 + config UEFI_CPER bool diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 554d795270d9..7a216984552b 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -13,19 +13,21 @@ KASAN_SANITIZE_runtime-wrappers.o := n obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o obj-$(CONFIG_EFI) += capsule.o memmap.o +obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o -obj-$(CONFIG_EFI_STUB) += libstub/ +subdir-$(CONFIG_EFI_STUB) += libstub obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o obj-$(CONFIG_EFI_TEST) += test/ obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o +obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o fake_map-y += fake_mem.o fake_map-$(CONFIG_X86) += x86_fake_mem.o diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 5ccf39986a14..34f53d898acb 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -31,7 +31,7 @@ __setup("dump_apple_properties", dump_properties_enable); struct dev_header { u32 len; u32 prop_count; - struct efi_dev_path path[0]; + struct efi_dev_path path[]; /* * followed by key/value pairs, each key and value preceded by u32 len, * len includes itself, value may be empty (in which case its len is 4) @@ -42,11 +42,11 @@ struct properties_header { u32 len; u32 version; u32 dev_count; - struct dev_header dev_header[0]; + struct dev_header dev_header[]; }; static void __init unmarshal_key_value_pairs(struct dev_header *dev_header, - struct device *dev, void *ptr, + struct device *dev, const void *ptr, struct property_entry entry[]) { int i; @@ -117,10 +117,10 @@ static int __init unmarshal_devices(struct properties_header *properties) while (offset + sizeof(struct dev_header) < properties->len) { struct dev_header *dev_header = (void *)properties + offset; struct property_entry *entry = NULL; + const struct efi_dev_path *ptr; struct device *dev; size_t len; int ret, i; - void *ptr; if (offset + dev_header->len > properties->len || dev_header->len <= sizeof(*dev_header)) { @@ -131,10 +131,10 @@ static int __init unmarshal_devices(struct properties_header *properties) ptr = dev_header->path; len = dev_header->len - sizeof(*dev_header); - dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len); + dev = efi_get_device_by_path(&ptr, &len); if (IS_ERR(dev)) { pr_err("device path parse error %ld at %#zx:\n", - PTR_ERR(dev), ptr - (void *)dev_header); + PTR_ERR(dev), (void *)ptr - (void *)dev_header); print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET, 16, 1, dev_header, dev_header->len, true); dev = NULL; diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index d99f5b0c8a09..9e5e62f5f94d 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -22,8 +22,6 @@ #include <asm/efi.h> -u64 efi_system_table; - static int __init is_memory(efi_memory_desc_t *md) { if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) @@ -36,7 +34,7 @@ static int __init is_memory(efi_memory_desc_t *md) * as some data members of the EFI system table are virtually remapped after * SetVirtualAddressMap() has been called. */ -static phys_addr_t efi_to_phys(unsigned long addr) +static phys_addr_t __init efi_to_phys(unsigned long addr) { efi_memory_desc_t *md; @@ -55,7 +53,7 @@ static phys_addr_t efi_to_phys(unsigned long addr) static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR; -static __initdata efi_config_table_type_t arch_tables[] = { +static const efi_config_table_type_t arch_tables[] __initconst = { {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table}, {NULL_GUID, NULL, NULL} }; @@ -83,17 +81,15 @@ static void __init init_screen_info(void) memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size); } -static int __init uefi_init(void) +static int __init uefi_init(u64 efi_system_table) { - efi_char16_t *c16; - void *config_tables; + efi_config_table_t *config_tables; + efi_system_table_t *systab; size_t table_size; - char vendor[100] = "unknown"; - int i, retval; + int retval; - efi.systab = early_memremap_ro(efi_system_table, - sizeof(efi_system_table_t)); - if (efi.systab == NULL) { + systab = early_memremap_ro(efi_system_table, sizeof(efi_system_table_t)); + if (systab == NULL) { pr_warn("Unable to map EFI system table.\n"); return -ENOMEM; } @@ -102,53 +98,29 @@ static int __init uefi_init(void) if (IS_ENABLED(CONFIG_64BIT)) set_bit(EFI_64BIT, &efi.flags); - /* - * Verify the EFI Table - */ - if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { - pr_err("System table signature incorrect\n"); - retval = -EINVAL; + retval = efi_systab_check_header(&systab->hdr, 2); + if (retval) goto out; - } - if ((efi.systab->hdr.revision >> 16) < 2) - pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", - efi.systab->hdr.revision >> 16, - efi.systab->hdr.revision & 0xffff); - - efi.runtime_version = efi.systab->hdr.revision; - - /* Show what we know for posterity */ - c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor), - sizeof(vendor) * sizeof(efi_char16_t)); - if (c16) { - for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) - vendor[i] = c16[i]; - vendor[i] = '\0'; - early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); - } - pr_info("EFI v%u.%.02u by %s\n", - efi.systab->hdr.revision >> 16, - efi.systab->hdr.revision & 0xffff, vendor); + efi.runtime = systab->runtime; + efi.runtime_version = systab->hdr.revision; - table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; - config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables), + efi_systab_report_header(&systab->hdr, efi_to_phys(systab->fw_vendor)); + + table_size = sizeof(efi_config_table_t) * systab->nr_tables; + config_tables = early_memremap_ro(efi_to_phys(systab->tables), table_size); if (config_tables == NULL) { pr_warn("Unable to map EFI config table array.\n"); retval = -ENOMEM; goto out; } - retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, - sizeof(efi_config_table_t), + retval = efi_config_parse_tables(config_tables, systab->nr_tables, arch_tables); - if (!retval) - efi.config_table = (unsigned long)efi.systab->tables; - early_memunmap(config_tables, table_size); out: - early_memunmap(efi.systab, sizeof(efi_system_table_t)); + early_memunmap(systab, sizeof(efi_system_table_t)); return retval; } @@ -233,19 +205,13 @@ static __init void reserve_regions(void) void __init efi_init(void) { struct efi_memory_map_data data; - struct efi_fdt_params params; + u64 efi_system_table; /* Grab UEFI information placed in FDT by stub */ - if (!efi_get_fdt_params(¶ms)) + efi_system_table = efi_get_fdt_params(&data); + if (!efi_system_table) return; - efi_system_table = params.system_table; - - data.desc_version = params.desc_ver; - data.desc_size = params.desc_size; - data.size = params.mmap_size; - data.phys_map = params.mmap; - if (efi_memmap_init_early(&data) < 0) { /* * If we are booting via UEFI, the UEFI memory map is the only @@ -259,7 +225,7 @@ void __init efi_init(void) "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", efi.memmap.desc_version); - if (uefi_init() < 0) { + if (uefi_init(efi_system_table) < 0) { efi_memmap_unmap(); return; } @@ -267,9 +233,8 @@ void __init efi_init(void) reserve_regions(); efi_esrt_init(); - memblock_reserve(params.mmap & PAGE_MASK, - PAGE_ALIGN(params.mmap_size + - (params.mmap & ~PAGE_MASK))); + memblock_reserve(data.phys_map & PAGE_MASK, + PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK))); init_screen_info(); @@ -349,7 +314,7 @@ static int efifb_add_links(const struct fwnode_handle *fwnode, * If this fails, retrying this function at a later point won't * change anything. So, don't return an error after this. */ - if (!device_link_add(dev, sup_dev, 0)) + if (!device_link_add(dev, sup_dev, fw_devlink_get_flags())) dev_warn(dev, "device_link_add() failed\n"); put_device(sup_dev); diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 9dda2602c862..b876373f2297 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -25,8 +25,6 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> -extern u64 efi_system_table; - #if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64) #include <asm/ptdump.h> @@ -54,13 +52,11 @@ device_initcall(ptdump_init); static bool __init efi_virtmap_init(void) { efi_memory_desc_t *md; - bool systab_found; efi_mm.pgd = pgd_alloc(&efi_mm); mm_init_cpumask(&efi_mm); init_new_context(NULL, &efi_mm); - systab_found = false; for_each_efi_memory_desc(md) { phys_addr_t phys = md->phys_addr; int ret; @@ -76,20 +72,6 @@ static bool __init efi_virtmap_init(void) &phys, ret); return false; } - /* - * If this entry covers the address of the UEFI system table, - * calculate and record its virtual address. - */ - if (efi_system_table >= phys && - efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) { - efi.systab = (void *)(unsigned long)(efi_system_table - - phys + md->virt_addr); - systab_found = true; - } - } - if (!systab_found) { - pr_err("No virtual mapping found for the UEFI System Table\n"); - return false; } if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions)) diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index d3067cbd5114..4dde8edd53b6 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -168,7 +168,7 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) static ssize_t efi_capsule_write(struct file *file, const char __user *buff, size_t count, loff_t *offp) { - int ret = 0; + int ret; struct capsule_info *cap_info = file->private_data; struct page *page; void *kbuff = NULL; diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 20123384271c..5c9625e552f4 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -31,13 +31,13 @@ static int __init match_acpi_dev(struct device *dev, const void *data) return !strcmp("0", hid_uid.uid); } -static long __init parse_acpi_path(struct efi_dev_path *node, +static long __init parse_acpi_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { struct acpi_hid_uid hid_uid = {}; struct device *phys_dev; - if (node->length != 12) + if (node->header.length != 12) return -EINVAL; sprintf(hid_uid.hid[0].id, "%c%c%c%04X", @@ -69,12 +69,12 @@ static int __init match_pci_dev(struct device *dev, void *data) return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; } -static long __init parse_pci_path(struct efi_dev_path *node, +static long __init parse_pci_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { unsigned int devfn; - if (node->length != 6) + if (node->header.length != 6) return -EINVAL; if (!parent) return -EINVAL; @@ -105,19 +105,19 @@ static long __init parse_pci_path(struct efi_dev_path *node, * search for a device. */ -static long __init parse_end_path(struct efi_dev_path *node, +static long __init parse_end_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { - if (node->length != 4) + if (node->header.length != 4) return -EINVAL; - if (node->sub_type != EFI_DEV_END_INSTANCE && - node->sub_type != EFI_DEV_END_ENTIRE) + if (node->header.sub_type != EFI_DEV_END_INSTANCE && + node->header.sub_type != EFI_DEV_END_ENTIRE) return -EINVAL; if (!parent) return -ENODEV; *child = get_device(parent); - return node->sub_type; + return node->header.sub_type; } /** @@ -156,7 +156,7 @@ static long __init parse_end_path(struct efi_dev_path *node, * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len, * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented. */ -struct device * __init efi_get_device_by_path(struct efi_dev_path **node, +struct device * __init efi_get_device_by_path(const struct efi_dev_path **node, size_t *len) { struct device *parent = NULL, *child; @@ -166,16 +166,16 @@ struct device * __init efi_get_device_by_path(struct efi_dev_path **node, return NULL; while (!ret) { - if (*len < 4 || *len < (*node)->length) + if (*len < 4 || *len < (*node)->header.length) ret = -EINVAL; - else if ((*node)->type == EFI_DEV_ACPI && - (*node)->sub_type == EFI_DEV_BASIC_ACPI) + else if ((*node)->header.type == EFI_DEV_ACPI && + (*node)->header.sub_type == EFI_DEV_BASIC_ACPI) ret = parse_acpi_path(*node, parent, &child); - else if ((*node)->type == EFI_DEV_HW && - (*node)->sub_type == EFI_DEV_PCI) + else if ((*node)->header.type == EFI_DEV_HW && + (*node)->header.sub_type == EFI_DEV_PCI) ret = parse_pci_path(*node, parent, &child); - else if (((*node)->type == EFI_DEV_END_PATH || - (*node)->type == EFI_DEV_END_PATH2)) + else if (((*node)->header.type == EFI_DEV_END_PATH || + (*node)->header.type == EFI_DEV_END_PATH2)) ret = parse_end_path(*node, parent, &child); else ret = -ENOTSUPP; @@ -185,8 +185,8 @@ struct device * __init efi_get_device_by_path(struct efi_dev_path **node, return ERR_PTR(ret); parent = child; - *node = (void *)*node + (*node)->length; - *len -= (*node)->length; + *node = (void *)*node + (*node)->header.length; + *len -= (*node)->header.length; } if (ret == EFI_DEV_END_ENTIRE) diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index b07c17643210..6aafdb67dbca 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c @@ -42,7 +42,12 @@ void __init efi_bgrt_init(struct acpi_table_header *table) return; } *bgrt = *(struct acpi_table_bgrt *)table; - if (bgrt->version != 1) { + /* + * Only version 1 is defined but some older laptops (seen on Lenovo + * Ivy Bridge models) have a correct version 1 BGRT table with the + * version set to 0, so we accept version 0 and 1. + */ + if (bgrt->version > 1) { pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n", bgrt->version); goto out; diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 9ea13e8d12ec..c2f1d4e6630b 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -161,7 +161,7 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, * * @record: pstore record to pass to callback * - * You MUST call efivar_enter_iter_begin() before this function, and + * You MUST call efivar_entry_iter_begin() before this function, and * efivar_entry_iter_end() afterwards. * */ @@ -356,7 +356,7 @@ static struct pstore_info efi_pstore_info = { static __init int efivars_pstore_init(void) { - if (!efi_enabled(EFI_RUNTIME_SERVICES)) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) return 0; if (!efivars_kobject()) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 621220ab3d0e..911a2bd0f6b7 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -17,10 +17,10 @@ #include <linux/kobject.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/debugfs.h> #include <linux/device.h> #include <linux/efi.h> #include <linux/of.h> -#include <linux/of_fdt.h> #include <linux/io.h> #include <linux/kexec.h> #include <linux/platform_device.h> @@ -35,27 +35,21 @@ #include <asm/early_ioremap.h> struct efi __read_mostly efi = { - .mps = EFI_INVALID_TABLE_ADDR, + .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, .acpi = EFI_INVALID_TABLE_ADDR, .acpi20 = EFI_INVALID_TABLE_ADDR, .smbios = EFI_INVALID_TABLE_ADDR, .smbios3 = EFI_INVALID_TABLE_ADDR, - .boot_info = EFI_INVALID_TABLE_ADDR, - .hcdp = EFI_INVALID_TABLE_ADDR, - .uga = EFI_INVALID_TABLE_ADDR, - .fw_vendor = EFI_INVALID_TABLE_ADDR, - .runtime = EFI_INVALID_TABLE_ADDR, - .config_table = EFI_INVALID_TABLE_ADDR, .esrt = EFI_INVALID_TABLE_ADDR, - .properties_table = EFI_INVALID_TABLE_ADDR, - .mem_attr_table = EFI_INVALID_TABLE_ADDR, - .rng_seed = EFI_INVALID_TABLE_ADDR, .tpm_log = EFI_INVALID_TABLE_ADDR, .tpm_final_log = EFI_INVALID_TABLE_ADDR, - .mem_reserve = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); +unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; +static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; +static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; + struct mm_struct efi_mm = { .mm_rb = RB_ROOT, .mm_users = ATOMIC_INIT(2), @@ -122,8 +116,6 @@ static ssize_t systab_show(struct kobject *kobj, if (!kobj || !buf) return -EINVAL; - if (efi.mps != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "MPS=0x%lx\n", efi.mps); if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); if (efi.acpi != EFI_INVALID_TABLE_ADDR) @@ -137,30 +129,17 @@ static ssize_t systab_show(struct kobject *kobj, str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); if (efi.smbios != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); - if (efi.hcdp != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); - if (efi.boot_info != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info); - if (efi.uga != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "UGA=0x%lx\n", efi.uga); - - return str - buf; -} -static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); + if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) { + extern char *efi_systab_show_arch(char *str); -#define EFI_FIELD(var) efi.var + str = efi_systab_show_arch(str); + } -#define EFI_ATTR_SHOW(name) \ -static ssize_t name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \ + return str - buf; } -EFI_ATTR_SHOW(fw_vendor); -EFI_ATTR_SHOW(runtime); -EFI_ATTR_SHOW(config_table); +static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); static ssize_t fw_platform_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -168,36 +147,24 @@ static ssize_t fw_platform_size_show(struct kobject *kobj, return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); } -static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor); -static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime); -static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table); +extern __weak struct kobj_attribute efi_attr_fw_vendor; +extern __weak struct kobj_attribute efi_attr_runtime; +extern __weak struct kobj_attribute efi_attr_config_table; static struct kobj_attribute efi_attr_fw_platform_size = __ATTR_RO(fw_platform_size); static struct attribute *efi_subsys_attrs[] = { &efi_attr_systab.attr, + &efi_attr_fw_platform_size.attr, &efi_attr_fw_vendor.attr, &efi_attr_runtime.attr, &efi_attr_config_table.attr, - &efi_attr_fw_platform_size.attr, NULL, }; -static umode_t efi_attr_is_visible(struct kobject *kobj, - struct attribute *attr, int n) +umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, + int n) { - if (attr == &efi_attr_fw_vendor.attr) { - if (efi_enabled(EFI_PARAVIRT) || - efi.fw_vendor == EFI_INVALID_TABLE_ADDR) - return 0; - } else if (attr == &efi_attr_runtime.attr) { - if (efi.runtime == EFI_INVALID_TABLE_ADDR) - return 0; - } else if (attr == &efi_attr_config_table.attr) { - if (efi.config_table == EFI_INVALID_TABLE_ADDR) - return 0; - } - return attr->mode; } @@ -325,6 +292,59 @@ free_entry: static inline int efivar_ssdt_load(void) { return 0; } #endif +#ifdef CONFIG_DEBUG_FS + +#define EFI_DEBUGFS_MAX_BLOBS 32 + +static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; + +static void __init efi_debugfs_init(void) +{ + struct dentry *efi_debugfs; + efi_memory_desc_t *md; + char name[32]; + int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; + int i = 0; + + efi_debugfs = debugfs_create_dir("efi", NULL); + if (IS_ERR_OR_NULL(efi_debugfs)) + return; + + for_each_efi_memory_desc(md) { + switch (md->type) { + case EFI_BOOT_SERVICES_CODE: + snprintf(name, sizeof(name), "boot_services_code%d", + type_count[md->type]++); + break; + case EFI_BOOT_SERVICES_DATA: + snprintf(name, sizeof(name), "boot_services_data%d", + type_count[md->type]++); + break; + default: + continue; + } + + if (i >= EFI_DEBUGFS_MAX_BLOBS) { + pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", + EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); + break; + } + + debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; + debugfs_blob[i].data = memremap(md->phys_addr, + debugfs_blob[i].size, + MEMREMAP_WB); + if (!debugfs_blob[i].data) + continue; + + debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); + i++; + } +} +#else +static inline void efi_debugfs_init(void) {} +#endif + /* * We register the efi subsystem with the firmware subsystem and the * efivars subsystem with the efi subsystem, if the system was booted with @@ -334,21 +354,30 @@ static int __init efisubsys_init(void) { int error; + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + efi.runtime_supported_mask = 0; + if (!efi_enabled(EFI_BOOT)) return 0; - /* - * Since we process only one efi_runtime_service() at a time, an - * ordered workqueue (which creates only one execution context) - * should suffice all our needs. - */ - efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); - if (!efi_rts_wq) { - pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return 0; + if (efi.runtime_supported_mask) { + /* + * Since we process only one efi_runtime_service() at a time, an + * ordered workqueue (which creates only one execution context) + * should suffice for all our needs. + */ + efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); + if (!efi_rts_wq) { + pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + efi.runtime_supported_mask = 0; + return 0; + } } + if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) + platform_device_register_simple("rtc-efi", 0, NULL, 0); + /* We register the efi directory at /sys/firmware/efi */ efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { @@ -356,12 +385,13 @@ static int __init efisubsys_init(void) return -ENOMEM; } - error = generic_ops_register(); - if (error) - goto err_put; - - if (efi_enabled(EFI_RUNTIME_SERVICES)) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { efivar_ssdt_load(); + error = generic_ops_register(); + if (error) + goto err_put; + platform_device_register_simple("efivars", 0, NULL, 0); + } error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); if (error) { @@ -381,12 +411,16 @@ static int __init efisubsys_init(void) goto err_remove_group; } + if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) + efi_debugfs_init(); + return 0; err_remove_group: sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); err_unregister: - generic_ops_unregister(); + if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) + generic_ops_unregister(); err_put: kobject_put(efi_kobj); return error; @@ -467,30 +501,27 @@ void __init efi_mem_reserve(phys_addr_t addr, u64 size) efi_arch_mem_reserve(addr, size); } -static __initdata efi_config_table_type_t common_tables[] = { +static const efi_config_table_type_t common_tables[] __initconst = { {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20}, {ACPI_TABLE_GUID, "ACPI", &efi.acpi}, - {HCDP_TABLE_GUID, "HCDP", &efi.hcdp}, - {MPS_TABLE_GUID, "MPS", &efi.mps}, {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, - {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, - {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table}, - {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, - {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, + {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table}, + {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed}, {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log}, - {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve}, + {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve}, + {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop}, #ifdef CONFIG_EFI_RCI2_TABLE {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys}, #endif {NULL_GUID, NULL, NULL}, }; -static __init int match_config_table(efi_guid_t *guid, +static __init int match_config_table(const efi_guid_t *guid, unsigned long table, - efi_config_table_type_t *table_types) + const efi_config_table_type_t *table_types) { int i; @@ -509,60 +540,59 @@ static __init int match_config_table(efi_guid_t *guid, return 0; } -int __init efi_config_parse_tables(void *config_tables, int count, int sz, - efi_config_table_type_t *arch_tables) +int __init efi_config_parse_tables(const efi_config_table_t *config_tables, + int count, + const efi_config_table_type_t *arch_tables) { - void *tablep; + const efi_config_table_64_t *tbl64 = (void *)config_tables; + const efi_config_table_32_t *tbl32 = (void *)config_tables; + const efi_guid_t *guid; + unsigned long table; int i; - tablep = config_tables; pr_info(""); for (i = 0; i < count; i++) { - efi_guid_t guid; - unsigned long table; - - if (efi_enabled(EFI_64BIT)) { - u64 table64; - guid = ((efi_config_table_64_t *)tablep)->guid; - table64 = ((efi_config_table_64_t *)tablep)->table; - table = table64; -#ifndef CONFIG_64BIT - if (table64 >> 32) { + if (!IS_ENABLED(CONFIG_X86)) { + guid = &config_tables[i].guid; + table = (unsigned long)config_tables[i].table; + } else if (efi_enabled(EFI_64BIT)) { + guid = &tbl64[i].guid; + table = tbl64[i].table; + + if (IS_ENABLED(CONFIG_X86_32) && + tbl64[i].table > U32_MAX) { pr_cont("\n"); pr_err("Table located above 4GB, disabling EFI.\n"); return -EINVAL; } -#endif } else { - guid = ((efi_config_table_32_t *)tablep)->guid; - table = ((efi_config_table_32_t *)tablep)->table; + guid = &tbl32[i].guid; + table = tbl32[i].table; } - if (!match_config_table(&guid, table, common_tables)) - match_config_table(&guid, table, arch_tables); - - tablep += sz; + if (!match_config_table(guid, table, common_tables)) + match_config_table(guid, table, arch_tables); } pr_cont("\n"); set_bit(EFI_CONFIG_TABLES, &efi.flags); - if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) { + if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { struct linux_efi_random_seed *seed; u32 size = 0; - seed = early_memremap(efi.rng_seed, sizeof(*seed)); + seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = seed->size; + size = READ_ONCE(seed->size); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); } if (size > 0) { - seed = early_memremap(efi.rng_seed, + seed = early_memremap(efi_rng_seed, sizeof(*seed) + size); if (seed != NULL) { pr_notice("seeding entropy pool\n"); - add_bootloader_randomness(seed->bits, seed->size); + add_bootloader_randomness(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); @@ -570,35 +600,17 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, } } - if (efi_enabled(EFI_MEMMAP)) + if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) efi_memattr_init(); efi_tpm_eventlog_init(); - /* Parse the EFI Properties table if it exists */ - if (efi.properties_table != EFI_INVALID_TABLE_ADDR) { - efi_properties_table_t *tbl; - - tbl = early_memremap(efi.properties_table, sizeof(*tbl)); - if (tbl == NULL) { - pr_err("Could not map Properties table!\n"); - return -ENOMEM; - } - - if (tbl->memory_protection_attribute & - EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA) - set_bit(EFI_NX_PE_DATA, &efi.flags); - - early_memunmap(tbl, sizeof(*tbl)); - } - - if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { - unsigned long prsv = efi.mem_reserve; + if (mem_reserve != EFI_INVALID_TABLE_ADDR) { + unsigned long prsv = mem_reserve; while (prsv) { struct linux_efi_memreserve *rsv; u8 *p; - int i; /* * Just map a full page: that is what we will get @@ -627,186 +639,78 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, } } - return 0; -} - -int __init efi_config_init(efi_config_table_type_t *arch_tables) -{ - void *config_tables; - int sz, ret; - - if (efi.systab->nr_tables == 0) - return 0; - - if (efi_enabled(EFI_64BIT)) - sz = sizeof(efi_config_table_64_t); - else - sz = sizeof(efi_config_table_32_t); + if (rt_prop != EFI_INVALID_TABLE_ADDR) { + efi_rt_properties_table_t *tbl; - /* - * Let's see what config tables the firmware passed to us. - */ - config_tables = early_memremap(efi.systab->tables, - efi.systab->nr_tables * sz); - if (config_tables == NULL) { - pr_err("Could not map Configuration table!\n"); - return -ENOMEM; + tbl = early_memremap(rt_prop, sizeof(*tbl)); + if (tbl) { + efi.runtime_supported_mask &= tbl->runtime_services_supported; + early_memunmap(tbl, sizeof(*tbl)); + } } - ret = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sz, - arch_tables); - - early_memunmap(config_tables, efi.systab->nr_tables * sz); - return ret; + return 0; } -#ifdef CONFIG_EFI_VARS_MODULE -static int __init efi_load_efivars(void) +int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr, + int min_major_version) { - struct platform_device *pdev; - - if (!efi_enabled(EFI_RUNTIME_SERVICES)) - return 0; - - pdev = platform_device_register_simple("efivars", 0, NULL, 0); - return PTR_ERR_OR_ZERO(pdev); -} -device_initcall(efi_load_efivars); -#endif - -#ifdef CONFIG_EFI_PARAMS_FROM_FDT - -#define UEFI_PARAM(name, prop, field) \ - { \ - { name }, \ - { prop }, \ - offsetof(struct efi_fdt_params, field), \ - sizeof_field(struct efi_fdt_params, field) \ + if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect!\n"); + return -EINVAL; } -struct params { - const char name[32]; - const char propname[32]; - int offset; - int size; -}; - -static __initdata struct params fdt_params[] = { - UEFI_PARAM("System Table", "linux,uefi-system-table", system_table), - UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap), - UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size), - UEFI_PARAM("MemMap Desc. Size", "linux,uefi-mmap-desc-size", desc_size), - UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver) -}; + if ((systab_hdr->revision >> 16) < min_major_version) + pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n", + systab_hdr->revision >> 16, + systab_hdr->revision & 0xffff, + min_major_version); -static __initdata struct params xen_fdt_params[] = { - UEFI_PARAM("System Table", "xen,uefi-system-table", system_table), - UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap), - UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size), - UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size), - UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver) -}; - -#define EFI_FDT_PARAMS_SIZE ARRAY_SIZE(fdt_params) - -static __initdata struct { - const char *uname; - const char *subnode; - struct params *params; -} dt_params[] = { - { "hypervisor", "uefi", xen_fdt_params }, - { "chosen", NULL, fdt_params }, -}; - -struct param_info { - int found; - void *params; - const char *missing; -}; + return 0; +} -static int __init __find_uefi_params(unsigned long node, - struct param_info *info, - struct params *params) +#ifndef CONFIG_IA64 +static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, + size_t size) { - const void *prop; - void *dest; - u64 val; - int i, len; - - for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) { - prop = of_get_flat_dt_prop(node, params[i].propname, &len); - if (!prop) { - info->missing = params[i].name; - return 0; - } + const efi_char16_t *ret; - dest = info->params + params[i].offset; - info->found++; - - val = of_read_number(prop, len / sizeof(u32)); - - if (params[i].size == sizeof(u32)) - *(u32 *)dest = val; - else - *(u64 *)dest = val; - - if (efi_enabled(EFI_DBG)) - pr_info(" %s: 0x%0*llx\n", params[i].name, - params[i].size * 2, val); - } - - return 1; + ret = early_memremap_ro(fw_vendor, size); + if (!ret) + pr_err("Could not map the firmware vendor!\n"); + return ret; } -static int __init fdt_find_uefi_params(unsigned long node, const char *uname, - int depth, void *data) +static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) { - struct param_info *info = data; - int i; - - for (i = 0; i < ARRAY_SIZE(dt_params); i++) { - const char *subnode = dt_params[i].subnode; - - if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) { - info->missing = dt_params[i].params[0].name; - continue; - } - - if (subnode) { - int err = of_get_flat_dt_subnode_by_name(node, subnode); - - if (err < 0) - return 0; - - node = err; - } - - return __find_uefi_params(node, info, dt_params[i].params); - } - - return 0; + early_memunmap((void *)fw_vendor, size); } +#else +#define map_fw_vendor(p, s) __va(p) +#define unmap_fw_vendor(v, s) +#endif -int __init efi_get_fdt_params(struct efi_fdt_params *params) +void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, + unsigned long fw_vendor) { - struct param_info info; - int ret; + char vendor[100] = "unknown"; + const efi_char16_t *c16; + size_t i; - pr_info("Getting EFI parameters from FDT:\n"); + c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; - info.found = 0; - info.params = params; - - ret = of_scan_flat_dt(fdt_find_uefi_params, &info); - if (!info.found) - pr_info("UEFI not found.\n"); - else if (!ret) - pr_err("Can't find '%s' in device tree!\n", - info.missing); + unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } - return ret; + pr_info("EFI v%u.%.02u by %s\n", + systab_hdr->revision >> 16, + systab_hdr->revision & 0xffff, + vendor); } -#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ static __initdata char memory_type_name[][20] = { "Reserved", @@ -968,10 +872,10 @@ static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; static int __init efi_memreserve_map_root(void) { - if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) + if (mem_reserve == EFI_INVALID_TABLE_ADDR) return -ENODEV; - efi_memreserve_root = memremap(efi.mem_reserve, + efi_memreserve_root = memremap(mem_reserve, sizeof(*efi_memreserve_root), MEMREMAP_WB); if (WARN_ON_ONCE(!efi_memreserve_root)) @@ -1076,7 +980,7 @@ static int update_efi_random_seed(struct notifier_block *nb, if (!kexec_in_progress) return NOTIFY_DONE; - seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB); + seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); if (seed != NULL) { size = min(seed->size, EFI_RANDOM_SEED_SIZE); memunmap(seed); @@ -1084,7 +988,7 @@ static int update_efi_random_seed(struct notifier_block *nb, pr_err("Could not map UEFI random seed!\n"); } if (size > 0) { - seed = memremap(efi.rng_seed, sizeof(*seed) + size, + seed = memremap(efi_rng_seed, sizeof(*seed) + size, MEMREMAP_WB); if (seed != NULL) { seed->size = size; @@ -1101,9 +1005,9 @@ static struct notifier_block efi_random_seed_nb = { .notifier_call = update_efi_random_seed, }; -static int register_update_efi_random_seed(void) +static int __init register_update_efi_random_seed(void) { - if (efi.rng_seed == EFI_INVALID_TABLE_ADDR) + if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) return 0; return register_reboot_notifier(&efi_random_seed_nb); } diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7576450c8254..78ad1ba8c987 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -83,13 +83,16 @@ static ssize_t efivar_attr_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) @@ -116,13 +119,16 @@ static ssize_t efivar_size_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; str += sprintf(str, "0x%lx\n", var->DataSize); @@ -133,12 +139,15 @@ static ssize_t efivar_data_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; memcpy(buf, var->Data, var->DataSize); @@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; + if (!entry || !buf) + return -EINVAL; + if (in_compat_syscall()) { struct compat_efi_variable *compat; @@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; struct compat_efi_variable *compat; + unsigned long datasize = sizeof(var->Data); size_t size; + int ret; if (!entry || !buf) return 0; - var->DataSize = 1024; - if (efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data)) + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); + var->DataSize = datasize; + if (ret) return -EIO; if (in_compat_syscall()) { @@ -664,7 +678,7 @@ int efivars_sysfs_init(void) struct kobject *parent_kobj = efivars_kobject(); int error = 0; - if (!efi_enabled(EFI_RUNTIME_SERVICES)) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) return -ENODEV; /* No efivars has been registered yet */ diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c new file mode 100644 index 000000000000..a1b199de9006 --- /dev/null +++ b/drivers/firmware/efi/embedded-firmware.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for extracting embedded firmware for peripherals from EFI code, + * + * Copyright (c) 2018 Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/dmi.h> +#include <linux/efi.h> +#include <linux/efi_embedded_fw.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <crypto/sha.h> + +/* Exported for use by lib/test_firmware.c only */ +LIST_HEAD(efi_embedded_fw_list); +EXPORT_SYMBOL_GPL(efi_embedded_fw_list); + +static bool checked_for_fw; + +static const struct dmi_system_id * const embedded_fw_table[] = { +#ifdef CONFIG_TOUCHSCREEN_DMI + touchscreen_dmi_table, +#endif + NULL +}; + +/* + * Note the efi_check_for_embedded_firmwares() code currently makes the + * following 2 assumptions. This may needs to be revisited if embedded firmware + * is found where this is not true: + * 1) The firmware is only found in EFI_BOOT_SERVICES_CODE memory segments + * 2) The firmware always starts at an offset which is a multiple of 8 bytes + */ +static int __init efi_check_md_for_embedded_firmware( + efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc) +{ + struct sha256_state sctx; + struct efi_embedded_fw *fw; + u8 sha256[32]; + u64 i, size; + u8 *map; + + size = md->num_pages << EFI_PAGE_SHIFT; + map = memremap(md->phys_addr, size, MEMREMAP_WB); + if (!map) { + pr_err("Error mapping EFI mem at %#llx\n", md->phys_addr); + return -ENOMEM; + } + + for (i = 0; (i + desc->length) <= size; i += 8) { + if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN)) + continue; + + sha256_init(&sctx); + sha256_update(&sctx, map + i, desc->length); + sha256_final(&sctx, sha256); + if (memcmp(sha256, desc->sha256, 32) == 0) + break; + } + if ((i + desc->length) > size) { + memunmap(map); + return -ENOENT; + } + + pr_info("Found EFI embedded fw '%s'\n", desc->name); + + fw = kmalloc(sizeof(*fw), GFP_KERNEL); + if (!fw) { + memunmap(map); + return -ENOMEM; + } + + fw->data = kmemdup(map + i, desc->length, GFP_KERNEL); + memunmap(map); + if (!fw->data) { + kfree(fw); + return -ENOMEM; + } + + fw->name = desc->name; + fw->length = desc->length; + list_add(&fw->list, &efi_embedded_fw_list); + + return 0; +} + +void __init efi_check_for_embedded_firmwares(void) +{ + const struct efi_embedded_fw_desc *fw_desc; + const struct dmi_system_id *dmi_id; + efi_memory_desc_t *md; + int i, r; + + for (i = 0; embedded_fw_table[i]; i++) { + dmi_id = dmi_first_match(embedded_fw_table[i]); + if (!dmi_id) + continue; + + fw_desc = dmi_id->driver_data; + + /* + * In some drivers the struct driver_data contains may contain + * other driver specific data after the fw_desc struct; and + * the fw_desc struct itself may be empty, skip these. + */ + if (!fw_desc->name) + continue; + + for_each_efi_memory_desc(md) { + if (md->type != EFI_BOOT_SERVICES_CODE) + continue; + + r = efi_check_md_for_embedded_firmware(md, fw_desc); + if (r == 0) + break; + } + } + + checked_for_fw = true; +} + +int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size) +{ + struct efi_embedded_fw *iter, *fw = NULL; + + if (!checked_for_fw) { + pr_warn("Warning %s called while we did not check for embedded fw\n", + __func__); + return -ENOENT; + } + + list_for_each_entry(iter, &efi_embedded_fw_list, list) { + if (strcmp(name, iter->name) == 0) { + fw = iter; + break; + } + } + + if (!fw) + return -ENOENT; + + *data = fw->data; + *size = fw->length; + + return 0; +} +EXPORT_SYMBOL_GPL(efi_get_embedded_fw); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 2762e0662bf4..e3d692696583 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -240,7 +240,6 @@ void __init efi_esrt_init(void) { void *va; struct efi_system_resource_table tmpesrt; - struct efi_system_resource_entry_v1 *v1_entries; size_t size, max, entry_size, entries_size; efi_memory_desc_t md; int rc; @@ -288,14 +287,13 @@ void __init efi_esrt_init(void) memcpy(&tmpesrt, va, sizeof(tmpesrt)); early_memunmap(va, size); - if (tmpesrt.fw_resource_version == 1) { - entry_size = sizeof (*v1_entries); - } else { + if (tmpesrt.fw_resource_version != 1) { pr_err("Unsupported ESRT version %lld.\n", tmpesrt.fw_resource_version); return; } + entry_size = sizeof(struct efi_system_resource_entry_v1); if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) { pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n", max - size, entry_size); diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c new file mode 100644 index 000000000000..bb042ab7c2be --- /dev/null +++ b/drivers/firmware/efi/fdtparams.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "efi: " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/efi.h> +#include <linux/libfdt.h> +#include <linux/of_fdt.h> + +#include <asm/unaligned.h> + +enum { + SYSTAB, + MMBASE, + MMSIZE, + DCSIZE, + DCVERS, + + PARAMCOUNT +}; + +static __initconst const char name[][22] = { + [SYSTAB] = "System Table ", + [MMBASE] = "MemMap Address ", + [MMSIZE] = "MemMap Size ", + [DCSIZE] = "MemMap Desc. Size ", + [DCVERS] = "MemMap Desc. Version ", +}; + +static __initconst const struct { + const char path[17]; + const char params[PARAMCOUNT][26]; +} dt_params[] = { + { +#ifdef CONFIG_XEN // <-------17------> + .path = "/hypervisor/uefi", + .params = { + [SYSTAB] = "xen,uefi-system-table", + [MMBASE] = "xen,uefi-mmap-start", + [MMSIZE] = "xen,uefi-mmap-size", + [DCSIZE] = "xen,uefi-mmap-desc-size", + [DCVERS] = "xen,uefi-mmap-desc-ver", + } + }, { +#endif + .path = "/chosen", + .params = { // <-----------26-----------> + [SYSTAB] = "linux,uefi-system-table", + [MMBASE] = "linux,uefi-mmap-start", + [MMSIZE] = "linux,uefi-mmap-size", + [DCSIZE] = "linux,uefi-mmap-desc-size", + [DCVERS] = "linux,uefi-mmap-desc-ver", + } + } +}; + +static int __init efi_get_fdt_prop(const void *fdt, int node, const char *pname, + const char *rname, void *var, int size) +{ + const void *prop; + int len; + u64 val; + + prop = fdt_getprop(fdt, node, pname, &len); + if (!prop) + return 1; + + val = (len == 4) ? (u64)be32_to_cpup(prop) : get_unaligned_be64(prop); + + if (size == 8) + *(u64 *)var = val; + else + *(u32 *)var = (val < U32_MAX) ? val : U32_MAX; // saturate + + if (efi_enabled(EFI_DBG)) + pr_info(" %s: 0x%0*llx\n", rname, size * 2, val); + + return 0; +} + +u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm) +{ + const void *fdt = initial_boot_params; + unsigned long systab; + int i, j, node; + struct { + void *var; + int size; + } target[] = { + [SYSTAB] = { &systab, sizeof(systab) }, + [MMBASE] = { &mm->phys_map, sizeof(mm->phys_map) }, + [MMSIZE] = { &mm->size, sizeof(mm->size) }, + [DCSIZE] = { &mm->desc_size, sizeof(mm->desc_size) }, + [DCVERS] = { &mm->desc_version, sizeof(mm->desc_version) }, + }; + + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name)); + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params)); + + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { + node = fdt_path_offset(fdt, dt_params[i].path); + if (node < 0) + continue; + + if (efi_enabled(EFI_DBG)) + pr_info("Getting UEFI parameters from %s in DT:\n", + dt_params[i].path); + + for (j = 0; j < ARRAY_SIZE(target); j++) { + const char *pname = dt_params[i].params[j]; + + if (!efi_get_fdt_prop(fdt, node, pname, name[j], + target[j].var, target[j].size)) + continue; + if (!j) + goto notfound; + pr_err("Can't find property '%s' in DT!\n", pname); + return 0; + } + return systab; + } +notfound: + pr_info("UEFI not found.\n"); + return 0; +} diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 98a81576213d..4d6246c6f651 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -25,6 +25,7 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ + -include $(srctree)/drivers/firmware/efi/libstub/hidden.h \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) \ @@ -39,11 +40,11 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ - random.o pci.o + file.o mem.o random.o randomalloc.o pci.o \ + skip_spaces.o lib-cmdline.o lib-ctype.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c -arm-deps-$(CONFIG_ARM64) += sort.c $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE $(call if_changed_rule,cc_o_c) @@ -53,6 +54,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += arm64-stub.o +lib-$(CONFIG_X86) += x86-stub.o CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 7bbef4a67350..99a5cde7c2d8 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -10,7 +10,7 @@ */ #include <linux/efi.h> -#include <linux/sort.h> +#include <linux/libfdt.h> #include <asm/efi.h> #include "efistub.h" @@ -36,6 +36,7 @@ #endif static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; +static bool __efistub_global flat_va_mapping; static efi_system_table_t *__efistub_global sys_table; @@ -87,6 +88,39 @@ void install_memreserve_table(void) pr_efi_err("Failed to install memreserve config table!\n"); } +static unsigned long get_dram_base(void) +{ + efi_status_t status; + unsigned long map_size, buff_size; + unsigned long membase = EFI_ERROR; + struct efi_memory_map map; + efi_memory_desc_t *md; + struct efi_boot_memmap boot_map; + + boot_map.map = (efi_memory_desc_t **)&map.map; + boot_map.map_size = &map_size; + boot_map.desc_size = &map.desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(&boot_map); + if (status != EFI_SUCCESS) + return membase; + + map.map_end = map.map + map_size; + + for_each_efi_memory_desc_in_map(&map, md) { + if (md->attribute & EFI_MEMORY_WB) { + if (membase > md->phys_addr) + membase = md->phys_addr; + } + } + + efi_bs_call(free_pool, map.map); + + return membase; +} /* * This function handles the architcture specific differences between arm and @@ -100,38 +134,46 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *reserve_size, unsigned long dram_base, efi_loaded_image_t *image); + +asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, + unsigned long fdt_addr, + unsigned long fdt_size); + /* * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint * that is described in the PE/COFF header. Most of the code is the same * for both archictectures, with the arch-specific code provided in the * handle_kernel_image() function. */ -unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, - unsigned long *image_addr) +efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg) { efi_loaded_image_t *image; efi_status_t status; + unsigned long image_addr; unsigned long image_size = 0; unsigned long dram_base; /* addr/point and size pairs for memory management*/ - unsigned long initrd_addr; - u64 initrd_size = 0; + unsigned long initrd_addr = 0; + unsigned long initrd_size = 0; unsigned long fdt_addr = 0; /* Original DTB */ unsigned long fdt_size = 0; char *cmdline_ptr = NULL; int cmdline_size = 0; - unsigned long new_fdt_addr; efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID; unsigned long reserve_addr = 0; unsigned long reserve_size = 0; enum efi_secureboot_mode secure_boot; struct screen_info *si; + efi_properties_table_t *prop_tbl; + unsigned long max_addr; sys_table = sys_table_arg; /* Check if we were booted by the EFI firmware */ - if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) + if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + status = EFI_INVALID_PARAMETER; goto fail; + } status = check_platform_features(); if (status != EFI_SUCCESS) @@ -152,6 +194,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, dram_base = get_dram_base(); if (dram_base == EFI_ERROR) { pr_efi_err("Failed to find DRAM base\n"); + status = EFI_LOAD_ERROR; goto fail; } @@ -160,9 +203,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, * protocol. We are going to copy the command line into the * device tree, so this can be allocated anywhere. */ - cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); + cmdline_ptr = efi_convert_cmdline(image, &cmdline_size, ULONG_MAX); if (!cmdline_ptr) { pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); + status = EFI_OUT_OF_RESOURCES; goto fail; } @@ -178,7 +222,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, si = setup_graphics(); - status = handle_kernel_image(image_addr, &image_size, + status = handle_kernel_image(&image_addr, &image_size, &reserve_addr, &reserve_size, dram_base, image); @@ -204,8 +248,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, if (strstr(cmdline_ptr, "dtb=")) pr_efi("Ignoring DTB from command line.\n"); } else { - status = handle_cmdline_files(image, cmdline_ptr, "dtb=", - ~0UL, &fdt_addr, &fdt_size); + status = efi_load_dtb(image, &fdt_addr, &fdt_size); if (status != EFI_SUCCESS) { pr_efi_err("Failed to load device tree!\n"); @@ -225,18 +268,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, if (!fdt_addr) pr_efi("Generating empty DTB\n"); - status = handle_cmdline_files(image, cmdline_ptr, "initrd=", - efi_get_max_initrd_addr(dram_base, - *image_addr), - (unsigned long *)&initrd_addr, - (unsigned long *)&initrd_size); - if (status != EFI_SUCCESS) - pr_efi_err("Failed initrd from command line!\n"); + if (!noinitrd()) { + max_addr = efi_get_max_initrd_addr(dram_base, image_addr); + status = efi_load_initrd_dev_path(&initrd_addr, &initrd_size, + max_addr); + if (status == EFI_SUCCESS) { + pr_efi("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n"); + } else if (status == EFI_NOT_FOUND) { + status = efi_load_initrd(image, &initrd_addr, &initrd_size, + ULONG_MAX, max_addr); + if (status == EFI_SUCCESS && initrd_size > 0) + pr_efi("Loaded initrd from command line option\n"); + } + if (status != EFI_SUCCESS) + pr_efi_err("Failed to load initrd!\n"); + } efi_random_get_seed(); + /* + * If the NX PE data feature is enabled in the properties table, we + * should take care not to create a virtual mapping that changes the + * relative placement of runtime services code and data regions, as + * they may belong to the same PE/COFF executable image in memory. + * The easiest way to achieve that is to simply use a 1:1 mapping. + */ + prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID); + flat_va_mapping = prop_tbl && + (prop_tbl->memory_protection_attribute & + EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA); + /* hibernation expects the runtime regions to stay in the same place */ - if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { + if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr() && !flat_va_mapping) { /* * Randomize the base of the UEFI runtime services region. * Preserve the 2 MB alignment of the region by taking a @@ -257,71 +320,30 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, install_memreserve_table(); - new_fdt_addr = fdt_addr; - status = allocate_new_fdt_and_exit_boot(handle, - &new_fdt_addr, efi_get_max_fdt_addr(dram_base), - initrd_addr, initrd_size, cmdline_ptr, - fdt_addr, fdt_size); + status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr, + efi_get_max_fdt_addr(dram_base), + initrd_addr, initrd_size, + cmdline_ptr, fdt_addr, fdt_size); + if (status != EFI_SUCCESS) + goto fail_free_initrd; - /* - * If all went well, we need to return the FDT address to the - * calling function so it can be passed to kernel as part of - * the kernel boot protocol. - */ - if (status == EFI_SUCCESS) - return new_fdt_addr; + efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr)); + /* not reached */ +fail_free_initrd: pr_efi_err("Failed to update FDT and exit boot services\n"); efi_free(initrd_size, initrd_addr); efi_free(fdt_size, fdt_addr); fail_free_image: - efi_free(image_size, *image_addr); + efi_free(image_size, image_addr); efi_free(reserve_size, reserve_addr); fail_free_cmdline: free_screen_info(si); efi_free(cmdline_size, (unsigned long)cmdline_ptr); fail: - return EFI_ERROR; -} - -static int cmp_mem_desc(const void *l, const void *r) -{ - const efi_memory_desc_t *left = l, *right = r; - - return (left->phys_addr > right->phys_addr) ? 1 : -1; -} - -/* - * Returns whether region @left ends exactly where region @right starts, - * or false if either argument is NULL. - */ -static bool regions_are_adjacent(efi_memory_desc_t *left, - efi_memory_desc_t *right) -{ - u64 left_end; - - if (left == NULL || right == NULL) - return false; - - left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; - - return left_end == right->phys_addr; -} - -/* - * Returns whether region @left and region @right have compatible memory type - * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. - */ -static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, - efi_memory_desc_t *right) -{ - static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | - EFI_MEMORY_WC | EFI_MEMORY_UC | - EFI_MEMORY_RUNTIME; - - return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; + return status; } /* @@ -336,23 +358,10 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = virtmap_base; - efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; + efi_memory_desc_t *in, *out = runtime_map; int l; - /* - * To work around potential issues with the Properties Table feature - * introduced in UEFI 2.5, which may split PE/COFF executable images - * in memory into several RuntimeServicesCode and RuntimeServicesData - * regions, we need to preserve the relative offsets between adjacent - * EFI_MEMORY_RUNTIME regions with the same memory type attributes. - * The easiest way to find adjacent regions is to sort the memory map - * before traversing it. - */ - if (IS_ENABLED(CONFIG_ARM64)) - sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, - NULL); - - for (l = 0; l < map_size; l += desc_size, prev = in) { + for (l = 0; l < map_size; l += desc_size) { u64 paddr, size; in = (void *)memory_map + l; @@ -362,8 +371,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, paddr = in->phys_addr; size = in->num_pages * EFI_PAGE_SIZE; + in->virt_addr = in->phys_addr; if (novamap()) { - in->virt_addr = in->phys_addr; continue; } @@ -372,9 +381,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - if ((IS_ENABLED(CONFIG_ARM64) && - !regions_are_adjacent(prev, in)) || - !regions_have_compatible_memory_type_attrs(prev, in)) { + if (!flat_va_mapping) { paddr = round_down(in->phys_addr, SZ_64K); size += in->phys_addr - paddr; @@ -389,10 +396,10 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, efi_virt_base = round_up(efi_virt_base, SZ_2M); else efi_virt_base = round_up(efi_virt_base, SZ_64K); - } - in->virt_addr = efi_virt_base + in->phys_addr - paddr; - efi_virt_base += size; + in->virt_addr += efi_virt_base - paddr; + efi_virt_base += size; + } memcpy(out, in, desc_size); out = (void *)out + desc_size; diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 7b2a6382b647..7826553af2ba 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -227,6 +227,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, * Relocate the zImage, so that it appears in the lowest 128 MB * memory window. */ + *image_addr = (unsigned long)image->image_base; *image_size = image->image_size; status = efi_relocate_kernel(image_addr, *image_size, *image_size, kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 2915b44132e6..db0c1a9c1699 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -6,17 +6,11 @@ * Adapted from ARM version by Mark Salter <msalter@redhat.com> */ -/* - * To prevent the compiler from emitting GOT-indirected (and thus absolute) - * references to the section markers, override their visibility as 'hidden' - */ -#pragma GCC visibility push(hidden) -#include <asm/sections.h> -#pragma GCC visibility pop #include <linux/efi.h> #include <asm/efi.h> #include <asm/memory.h> +#include <asm/sections.h> #include <asm/sysreg.h> #include "efistub.h" @@ -49,7 +43,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, { efi_status_t status; unsigned long kernel_size, kernel_memsize = 0; - void *old_image_addr = (void *)*image_addr; unsigned long preferred_offset; u64 phys_seed = 0; @@ -123,6 +116,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, * Mustang), we can still place the kernel at the address * 'dram_base + TEXT_OFFSET'. */ + *image_addr = (unsigned long)_text; if (*image_addr == preferred_offset) return EFI_SUCCESS; @@ -147,7 +141,11 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } *image_addr = *reserve_addr + TEXT_OFFSET; } - memcpy((void *)*image_addr, old_image_addr, kernel_size); + + if (image->image_base != _text) + pr_efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); + + memcpy((void *)*image_addr, _text, kernel_size); return EFI_SUCCESS; } diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 74ddfb496140..9f34c7242939 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -12,34 +12,27 @@ #include "efistub.h" -/* - * Some firmware implementations have problems reading files in one go. - * A read chunk size of 1MB seems to work for most platforms. - * - * Unfortunately, reading files in chunks triggers *other* bugs on some - * platforms, so we provide a way to disable this workaround, which can - * be done by passing "efi=nochunk" on the EFI boot stub command line. - * - * If you experience issues with initrd images being corrupt it's worth - * trying efi=nochunk, but chunking is enabled by default because there - * are far more machines that require the workaround than those that - * break with it enabled. - */ -#define EFI_READ_CHUNK_SIZE (1024 * 1024) - -static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE; - +static bool __efistub_global efi_nochunk; static bool __efistub_global efi_nokaslr; +static bool __efistub_global efi_noinitrd; static bool __efistub_global efi_quiet; static bool __efistub_global efi_novamap; static bool __efistub_global efi_nosoftreserve; static bool __efistub_global efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); +bool __pure nochunk(void) +{ + return efi_nochunk; +} bool __pure nokaslr(void) { return efi_nokaslr; } +bool __pure noinitrd(void) +{ + return efi_noinitrd; +} bool __pure is_quiet(void) { return efi_quiet; @@ -53,13 +46,6 @@ bool __pure __efi_soft_reserve_enabled(void) return !efi_nosoftreserve; } -#define EFI_MMAP_NR_SLACK_SLOTS 8 - -struct file_info { - efi_file_handle_t *handle; - u64 size; -}; - void efi_printk(char *str) { char *s8; @@ -77,369 +63,6 @@ void efi_printk(char *str) } } -static inline bool mmap_has_headroom(unsigned long buff_size, - unsigned long map_size, - unsigned long desc_size) -{ - unsigned long slack = buff_size - map_size; - - return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; -} - -efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) -{ - efi_memory_desc_t *m = NULL; - efi_status_t status; - unsigned long key; - u32 desc_version; - - *map->desc_size = sizeof(*m); - *map->map_size = *map->desc_size * 32; - *map->buff_size = *map->map_size; -again: - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, - *map->map_size, (void **)&m); - if (status != EFI_SUCCESS) - goto fail; - - *map->desc_size = 0; - key = 0; - status = efi_bs_call(get_memory_map, map->map_size, m, - &key, map->desc_size, &desc_version); - if (status == EFI_BUFFER_TOO_SMALL || - !mmap_has_headroom(*map->buff_size, *map->map_size, - *map->desc_size)) { - efi_bs_call(free_pool, m); - /* - * Make sure there is some entries of headroom so that the - * buffer can be reused for a new map after allocations are - * no longer permitted. Its unlikely that the map will grow to - * exceed this headroom once we are ready to trigger - * ExitBootServices() - */ - *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; - *map->buff_size = *map->map_size; - goto again; - } - - if (status != EFI_SUCCESS) - efi_bs_call(free_pool, m); - - if (map->key_ptr && status == EFI_SUCCESS) - *map->key_ptr = key; - if (map->desc_ver && status == EFI_SUCCESS) - *map->desc_ver = desc_version; - -fail: - *map->map = m; - return status; -} - - -unsigned long get_dram_base(void) -{ - efi_status_t status; - unsigned long map_size, buff_size; - unsigned long membase = EFI_ERROR; - struct efi_memory_map map; - efi_memory_desc_t *md; - struct efi_boot_memmap boot_map; - - boot_map.map = (efi_memory_desc_t **)&map.map; - boot_map.map_size = &map_size; - boot_map.desc_size = &map.desc_size; - boot_map.desc_ver = NULL; - boot_map.key_ptr = NULL; - boot_map.buff_size = &buff_size; - - status = efi_get_memory_map(&boot_map); - if (status != EFI_SUCCESS) - return membase; - - map.map_end = map.map + map_size; - - for_each_efi_memory_desc_in_map(&map, md) { - if (md->attribute & EFI_MEMORY_WB) { - if (membase > md->phys_addr) - membase = md->phys_addr; - } - } - - efi_bs_call(free_pool, map.map); - - return membase; -} - -/* - * Allocate at the highest possible address that is not above 'max'. - */ -efi_status_t efi_high_alloc(unsigned long size, unsigned long align, - unsigned long *addr, unsigned long max) -{ - unsigned long map_size, desc_size, buff_size; - efi_memory_desc_t *map; - efi_status_t status; - unsigned long nr_pages; - u64 max_addr = 0; - int i; - struct efi_boot_memmap boot_map; - - boot_map.map = ↦ - boot_map.map_size = &map_size; - boot_map.desc_size = &desc_size; - boot_map.desc_ver = NULL; - boot_map.key_ptr = NULL; - boot_map.buff_size = &buff_size; - - status = efi_get_memory_map(&boot_map); - if (status != EFI_SUCCESS) - goto fail; - - /* - * Enforce minimum alignment that EFI or Linux requires when - * requesting a specific address. We are doing page-based (or - * larger) allocations, and both the address and size must meet - * alignment constraints. - */ - if (align < EFI_ALLOC_ALIGN) - align = EFI_ALLOC_ALIGN; - - size = round_up(size, EFI_ALLOC_ALIGN); - nr_pages = size / EFI_PAGE_SIZE; -again: - for (i = 0; i < map_size / desc_size; i++) { - efi_memory_desc_t *desc; - unsigned long m = (unsigned long)map; - u64 start, end; - - desc = efi_early_memdesc_ptr(m, desc_size, i); - if (desc->type != EFI_CONVENTIONAL_MEMORY) - continue; - - if (efi_soft_reserve_enabled() && - (desc->attribute & EFI_MEMORY_SP)) - continue; - - if (desc->num_pages < nr_pages) - continue; - - start = desc->phys_addr; - end = start + desc->num_pages * EFI_PAGE_SIZE; - - if (end > max) - end = max; - - if ((start + size) > end) - continue; - - if (round_down(end - size, align) < start) - continue; - - start = round_down(end - size, align); - - /* - * Don't allocate at 0x0. It will confuse code that - * checks pointers against NULL. - */ - if (start == 0x0) - continue; - - if (start > max_addr) - max_addr = start; - } - - if (!max_addr) - status = EFI_NOT_FOUND; - else { - status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, nr_pages, &max_addr); - if (status != EFI_SUCCESS) { - max = max_addr; - max_addr = 0; - goto again; - } - - *addr = max_addr; - } - - efi_bs_call(free_pool, map); -fail: - return status; -} - -/* - * Allocate at the lowest possible address that is not below 'min'. - */ -efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, - unsigned long *addr, unsigned long min) -{ - unsigned long map_size, desc_size, buff_size; - efi_memory_desc_t *map; - efi_status_t status; - unsigned long nr_pages; - int i; - struct efi_boot_memmap boot_map; - - boot_map.map = ↦ - boot_map.map_size = &map_size; - boot_map.desc_size = &desc_size; - boot_map.desc_ver = NULL; - boot_map.key_ptr = NULL; - boot_map.buff_size = &buff_size; - - status = efi_get_memory_map(&boot_map); - if (status != EFI_SUCCESS) - goto fail; - - /* - * Enforce minimum alignment that EFI or Linux requires when - * requesting a specific address. We are doing page-based (or - * larger) allocations, and both the address and size must meet - * alignment constraints. - */ - if (align < EFI_ALLOC_ALIGN) - align = EFI_ALLOC_ALIGN; - - size = round_up(size, EFI_ALLOC_ALIGN); - nr_pages = size / EFI_PAGE_SIZE; - for (i = 0; i < map_size / desc_size; i++) { - efi_memory_desc_t *desc; - unsigned long m = (unsigned long)map; - u64 start, end; - - desc = efi_early_memdesc_ptr(m, desc_size, i); - - if (desc->type != EFI_CONVENTIONAL_MEMORY) - continue; - - if (efi_soft_reserve_enabled() && - (desc->attribute & EFI_MEMORY_SP)) - continue; - - if (desc->num_pages < nr_pages) - continue; - - start = desc->phys_addr; - end = start + desc->num_pages * EFI_PAGE_SIZE; - - if (start < min) - start = min; - - start = round_up(start, align); - if ((start + size) > end) - continue; - - status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, nr_pages, &start); - if (status == EFI_SUCCESS) { - *addr = start; - break; - } - } - - if (i == map_size / desc_size) - status = EFI_NOT_FOUND; - - efi_bs_call(free_pool, map); -fail: - return status; -} - -void efi_free(unsigned long size, unsigned long addr) -{ - unsigned long nr_pages; - - if (!size) - return; - - nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - efi_bs_call(free_pages, addr, nr_pages); -} - -static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16, - void **handle, u64 *file_sz) -{ - efi_file_handle_t *h, *fh = __fh; - efi_file_info_t *info; - efi_status_t status; - efi_guid_t info_guid = EFI_FILE_INFO_ID; - unsigned long info_sz; - - status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0); - if (status != EFI_SUCCESS) { - efi_printk("Failed to open file: "); - efi_char16_printk(filename_16); - efi_printk("\n"); - return status; - } - - *handle = h; - - info_sz = 0; - status = h->get_info(h, &info_guid, &info_sz, NULL); - if (status != EFI_BUFFER_TOO_SMALL) { - efi_printk("Failed to get file info size\n"); - return status; - } - -grow: - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz, - (void **)&info); - if (status != EFI_SUCCESS) { - efi_printk("Failed to alloc mem for file info\n"); - return status; - } - - status = h->get_info(h, &info_guid, &info_sz, info); - if (status == EFI_BUFFER_TOO_SMALL) { - efi_bs_call(free_pool, info); - goto grow; - } - - *file_sz = info->file_size; - efi_bs_call(free_pool, info); - - if (status != EFI_SUCCESS) - efi_printk("Failed to get initrd info\n"); - - return status; -} - -static efi_status_t efi_file_read(efi_file_handle_t *handle, - unsigned long *size, void *addr) -{ - return handle->read(handle, size, addr); -} - -static efi_status_t efi_file_close(efi_file_handle_t *handle) -{ - return handle->close(handle); -} - -static efi_status_t efi_open_volume(efi_loaded_image_t *image, - efi_file_handle_t **__fh) -{ - efi_file_io_interface_t *io; - efi_file_handle_t *fh; - efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; - efi_status_t status; - efi_handle_t handle = image->device_handle; - - status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io); - if (status != EFI_SUCCESS) { - efi_printk("Failed to handle fs_proto\n"); - return status; - } - - status = io->open_volume(io, &fh); - if (status != EFI_SUCCESS) - efi_printk("Failed to open volume\n"); - else - *__fh = fh; - - return status; -} - /* * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= * option, e.g. efi=nochunk. @@ -450,316 +73,42 @@ static efi_status_t efi_open_volume(efi_loaded_image_t *image, */ efi_status_t efi_parse_options(char const *cmdline) { - char *str; - - str = strstr(cmdline, "nokaslr"); - if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - efi_nokaslr = true; - - str = strstr(cmdline, "quiet"); - if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - efi_quiet = true; - - /* - * If no EFI parameters were specified on the cmdline we've got - * nothing to do. - */ - str = strstr(cmdline, "efi="); - if (!str) - return EFI_SUCCESS; - - /* Skip ahead to first argument */ - str += strlen("efi="); - - /* - * Remember, because efi= is also used by the kernel we need to - * skip over arguments we don't understand. - */ - while (*str && *str != ' ') { - if (!strncmp(str, "nochunk", 7)) { - str += strlen("nochunk"); - efi_chunk_size = -1UL; - } - - if (!strncmp(str, "novamap", 7)) { - str += strlen("novamap"); - efi_novamap = true; - } - - if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && - !strncmp(str, "nosoftreserve", 7)) { - str += strlen("nosoftreserve"); - efi_nosoftreserve = true; - } - - if (!strncmp(str, "disable_early_pci_dma", 21)) { - str += strlen("disable_early_pci_dma"); - efi_disable_pci_dma = true; - } - - if (!strncmp(str, "no_disable_early_pci_dma", 24)) { - str += strlen("no_disable_early_pci_dma"); - efi_disable_pci_dma = false; - } - - /* Group words together, delimited by "," */ - while (*str && *str != ' ' && *str != ',') - str++; - - if (*str == ',') - str++; - } - - return EFI_SUCCESS; -} - -/* - * Check the cmdline for a LILO-style file= arguments. - * - * We only support loading a file from the same filesystem as - * the kernel image. - */ -efi_status_t handle_cmdline_files(efi_loaded_image_t *image, - char *cmd_line, char *option_string, - unsigned long max_addr, - unsigned long *load_addr, - unsigned long *load_size) -{ - struct file_info *files; - unsigned long file_addr; - u64 file_size_total; - efi_file_handle_t *fh = NULL; + size_t len = strlen(cmdline) + 1; efi_status_t status; - int nr_files; - char *str; - int i, j, k; - - file_addr = 0; - file_size_total = 0; - - str = cmd_line; - - j = 0; /* See close_handles */ - - if (!load_addr || !load_size) - return EFI_INVALID_PARAMETER; - - *load_addr = 0; - *load_size = 0; - - if (!str || !*str) - return EFI_SUCCESS; - - for (nr_files = 0; *str; nr_files++) { - str = strstr(str, option_string); - if (!str) - break; - - str += strlen(option_string); - - /* Skip any leading slashes */ - while (*str == '/' || *str == '\\') - str++; - - while (*str && *str != ' ' && *str != '\n') - str++; - } - - if (!nr_files) - return EFI_SUCCESS; - - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, - nr_files * sizeof(*files), (void **)&files); - if (status != EFI_SUCCESS) { - pr_efi_err("Failed to alloc mem for file handle list\n"); - goto fail; - } - - str = cmd_line; - for (i = 0; i < nr_files; i++) { - struct file_info *file; - efi_char16_t filename_16[256]; - efi_char16_t *p; - - str = strstr(str, option_string); - if (!str) - break; - - str += strlen(option_string); - - file = &files[i]; - p = filename_16; - - /* Skip any leading slashes */ - while (*str == '/' || *str == '\\') - str++; - - while (*str && *str != ' ' && *str != '\n') { - if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16)) - break; - - if (*str == '/') { - *p++ = '\\'; - str++; - } else { - *p++ = *str++; - } - } - - *p = '\0'; + char *str, *buf; - /* Only open the volume once. */ - if (!i) { - status = efi_open_volume(image, &fh); - if (status != EFI_SUCCESS) - goto free_files; - } + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf); + if (status != EFI_SUCCESS) + return status; - status = efi_file_size(fh, filename_16, (void **)&file->handle, - &file->size); - if (status != EFI_SUCCESS) - goto close_handles; + str = skip_spaces(memcpy(buf, cmdline, len)); - file_size_total += file->size; - } + while (*str) { + char *param, *val; - if (file_size_total) { - unsigned long addr; + str = next_arg(str, ¶m, &val); - /* - * Multiple files need to be at consecutive addresses in memory, - * so allocate enough memory for all the files. This is used - * for loading multiple files. - */ - status = efi_high_alloc(file_size_total, 0x1000, &file_addr, - max_addr); - if (status != EFI_SUCCESS) { - pr_efi_err("Failed to alloc highmem for files\n"); - goto close_handles; - } + if (!strcmp(param, "nokaslr")) { + efi_nokaslr = true; + } else if (!strcmp(param, "quiet")) { + efi_quiet = true; + } else if (!strcmp(param, "noinitrd")) { + efi_noinitrd = true; + } else if (!strcmp(param, "efi") && val) { + efi_nochunk = parse_option_str(val, "nochunk"); + efi_novamap = parse_option_str(val, "novamap"); - /* We've run out of free low memory. */ - if (file_addr > max_addr) { - pr_efi_err("We've run out of free low memory\n"); - status = EFI_INVALID_PARAMETER; - goto free_file_total; - } + efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && + parse_option_str(val, "nosoftreserve"); - addr = file_addr; - for (j = 0; j < nr_files; j++) { - unsigned long size; - - size = files[j].size; - while (size) { - unsigned long chunksize; - - if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size) - chunksize = efi_chunk_size; - else - chunksize = size; - - status = efi_file_read(files[j].handle, - &chunksize, - (void *)addr); - if (status != EFI_SUCCESS) { - pr_efi_err("Failed to read file\n"); - goto free_file_total; - } - addr += chunksize; - size -= chunksize; - } - - efi_file_close(files[j].handle); + if (parse_option_str(val, "disable_early_pci_dma")) + efi_disable_pci_dma = true; + if (parse_option_str(val, "no_disable_early_pci_dma")) + efi_disable_pci_dma = false; } - - } - - efi_bs_call(free_pool, files); - - *load_addr = file_addr; - *load_size = file_size_total; - - return status; - -free_file_total: - efi_free(file_size_total, file_addr); - -close_handles: - for (k = j; k < i; k++) - efi_file_close(files[k].handle); -free_files: - efi_bs_call(free_pool, files); -fail: - *load_addr = 0; - *load_size = 0; - - return status; -} -/* - * Relocate a kernel image, either compressed or uncompressed. - * In the ARM64 case, all kernel images are currently - * uncompressed, and as such when we relocate it we need to - * allocate additional space for the BSS segment. Any low - * memory that this function should avoid needs to be - * unavailable in the EFI memory map, as if the preferred - * address is not available the lowest available address will - * be used. - */ -efi_status_t efi_relocate_kernel(unsigned long *image_addr, - unsigned long image_size, - unsigned long alloc_size, - unsigned long preferred_addr, - unsigned long alignment, - unsigned long min_addr) -{ - unsigned long cur_image_addr; - unsigned long new_addr = 0; - efi_status_t status; - unsigned long nr_pages; - efi_physical_addr_t efi_addr = preferred_addr; - - if (!image_addr || !image_size || !alloc_size) - return EFI_INVALID_PARAMETER; - if (alloc_size < image_size) - return EFI_INVALID_PARAMETER; - - cur_image_addr = *image_addr; - - /* - * The EFI firmware loader could have placed the kernel image - * anywhere in memory, but the kernel has restrictions on the - * max physical address it can run at. Some architectures - * also have a prefered address, so first try to relocate - * to the preferred address. If that fails, allocate as low - * as possible while respecting the required alignment. - */ - nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, nr_pages, &efi_addr); - new_addr = efi_addr; - /* - * If preferred address allocation failed allocate as low as - * possible. - */ - if (status != EFI_SUCCESS) { - status = efi_low_alloc_above(alloc_size, alignment, &new_addr, - min_addr); } - if (status != EFI_SUCCESS) { - pr_efi_err("Failed to allocate usable memory for kernel.\n"); - return status; - } - - /* - * We know source/dest won't overlap since both memory ranges - * have been allocated by UEFI, so we can safely use memcpy. - */ - memcpy((void *)new_addr, (void *)cur_image_addr, image_size); - - /* Return the new address of the relocated image. */ - *image_addr = new_addr; - - return status; + efi_bs_call(free_pool, buf); + return EFI_SUCCESS; } /* @@ -811,23 +160,19 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) return dst; } -#ifndef MAX_CMDLINE_ADDRESS -#define MAX_CMDLINE_ADDRESS ULONG_MAX -#endif - /* * Convert the unicode UEFI command line to ASCII to pass to kernel. * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ char *efi_convert_cmdline(efi_loaded_image_t *image, - int *cmd_line_len) + int *cmd_line_len, unsigned long max_addr) { const u16 *s2; u8 *s1 = NULL; unsigned long cmdline_addr = 0; - int load_options_chars = image->load_options_size / 2; /* UTF-16 */ - const u16 *options = image->load_options; + int load_options_chars = efi_table_attr(image, load_options_size) / 2; + const u16 *options = efi_table_attr(image, load_options); int options_bytes = 0; /* UTF-8 bytes */ int options_chars = 0; /* UTF-16 chars */ efi_status_t status; @@ -849,8 +194,7 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, options_bytes++; /* NUL termination */ - status = efi_high_alloc(options_bytes, 0, &cmdline_addr, - MAX_CMDLINE_ADDRESS); + status = efi_allocate_pages(options_bytes, &cmdline_addr, max_addr); if (status != EFI_SUCCESS) return NULL; @@ -962,3 +306,89 @@ void efi_char16_printk(efi_char16_t *str) efi_call_proto(efi_table_attr(efi_system_table(), con_out), output_string, str); } + +/* + * The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way + * for the firmware or bootloader to expose the initrd data directly to the stub + * via the trivial LoadFile2 protocol, which is defined in the UEFI spec, and is + * very easy to implement. It is a simple Linux initrd specific conduit between + * kernel and firmware, allowing us to put the EFI stub (being part of the + * kernel) in charge of where and when to load the initrd, while leaving it up + * to the firmware to decide whether it needs to expose its filesystem hierarchy + * via EFI protocols. + */ +static const struct { + struct efi_vendor_dev_path vendor; + struct efi_generic_dev_path end; +} __packed initrd_dev_path = { + { + { + EFI_DEV_MEDIA, + EFI_DEV_MEDIA_VENDOR, + sizeof(struct efi_vendor_dev_path), + }, + LINUX_EFI_INITRD_MEDIA_GUID + }, { + EFI_DEV_END_PATH, + EFI_DEV_END_ENTIRE, + sizeof(struct efi_generic_dev_path) + } +}; + +/** + * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path + * @load_addr: pointer to store the address where the initrd was loaded + * @load_size: pointer to store the size of the loaded initrd + * @max: upper limit for the initrd memory allocation + * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which + * case @load_addr and @load_size are assigned accordingly + * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd + * device path + * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL + * %EFI_OUT_OF_RESOURCES if memory allocation failed + * %EFI_LOAD_ERROR in all other cases + */ +efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, + unsigned long *load_size, + unsigned long max) +{ + efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID; + efi_device_path_protocol_t *dp; + efi_load_file2_protocol_t *lf2; + unsigned long initrd_addr; + unsigned long initrd_size; + efi_handle_t handle; + efi_status_t status; + + if (!load_addr || !load_size) + return EFI_INVALID_PARAMETER; + + dp = (efi_device_path_protocol_t *)&initrd_dev_path; + status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle); + if (status != EFI_SUCCESS) + return status; + + status = efi_bs_call(handle_protocol, handle, &lf2_proto_guid, + (void **)&lf2); + if (status != EFI_SUCCESS) + return status; + + status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL); + if (status != EFI_BUFFER_TOO_SMALL) + return EFI_LOAD_ERROR; + + status = efi_allocate_pages(initrd_size, &initrd_addr, max); + if (status != EFI_SUCCESS) + return status; + + status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, + (void *)initrd_addr); + if (status != EFI_SUCCESS) { + efi_free(initrd_size, initrd_addr); + return EFI_LOAD_ERROR; + } + + *load_addr = initrd_addr; + *load_size = initrd_size; + return EFI_SUCCESS; +} diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index c244b165005e..cc90a748bcf0 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -31,7 +31,9 @@ #define __efistub_global #endif +extern bool __pure nochunk(void); extern bool __pure nokaslr(void); +extern bool __pure noinitrd(void); extern bool __pure is_quiet(void); extern bool __pure novamap(void); @@ -43,10 +45,549 @@ extern __pure efi_system_table_t *efi_system_table(void); #define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg) -void efi_char16_printk(efi_char16_t *); -void efi_char16_printk(efi_char16_t *); +/* Helper macros for the usual case of using simple C variables: */ +#ifndef fdt_setprop_inplace_var +#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \ + fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var)) +#endif + +#ifndef fdt_setprop_var +#define fdt_setprop_var(fdt, node_offset, name, var) \ + fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) +#endif + +#define get_efi_var(name, vendor, ...) \ + efi_rt_call(get_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + +#define set_efi_var(name, vendor, ...) \ + efi_rt_call(set_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + +#define efi_get_handle_at(array, idx) \ + (efi_is_native() ? (array)[idx] \ + : (efi_handle_t)(unsigned long)((u32 *)(array))[idx]) + +#define efi_get_handle_num(size) \ + ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32))) + +#define for_each_efi_handle(handle, array, size, i) \ + for (i = 0; \ + i < efi_get_handle_num(size) && \ + ((handle = efi_get_handle_at((array), i)) || true); \ + i++) + +/* + * Allocation types for calls to boottime->allocate_pages. + */ +#define EFI_ALLOCATE_ANY_PAGES 0 +#define EFI_ALLOCATE_MAX_ADDRESS 1 +#define EFI_ALLOCATE_ADDRESS 2 +#define EFI_MAX_ALLOCATE_TYPE 3 + +/* + * The type of search to perform when calling boottime->locate_handle + */ +#define EFI_LOCATE_ALL_HANDLES 0 +#define EFI_LOCATE_BY_REGISTER_NOTIFY 1 +#define EFI_LOCATE_BY_PROTOCOL 2 + +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + +typedef struct efi_generic_dev_path efi_device_path_protocol_t; + +/* + * EFI Boot Services table + */ +union efi_boot_services { + struct { + efi_table_hdr_t hdr; + void *raise_tpl; + void *restore_tpl; + efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long, + efi_physical_addr_t *); + efi_status_t (__efiapi *free_pages)(efi_physical_addr_t, + unsigned long); + efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *, + unsigned long *, + unsigned long *, u32 *); + efi_status_t (__efiapi *allocate_pool)(int, unsigned long, + void **); + efi_status_t (__efiapi *free_pool)(void *); + void *create_event; + void *set_timer; + void *wait_for_event; + void *signal_event; + void *close_event; + void *check_event; + void *install_protocol_interface; + void *reinstall_protocol_interface; + void *uninstall_protocol_interface; + efi_status_t (__efiapi *handle_protocol)(efi_handle_t, + efi_guid_t *, void **); + void *__reserved; + void *register_protocol_notify; + efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *, + void *, unsigned long *, + efi_handle_t *); + efi_status_t (__efiapi *locate_device_path)(efi_guid_t *, + efi_device_path_protocol_t **, + efi_handle_t *); + efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *, + void *); + void *load_image; + void *start_image; + efi_status_t __noreturn (__efiapi *exit)(efi_handle_t, + efi_status_t, + unsigned long, + efi_char16_t *); + void *unload_image; + efi_status_t (__efiapi *exit_boot_services)(efi_handle_t, + unsigned long); + void *get_next_monotonic_count; + void *stall; + void *set_watchdog_timer; + void *connect_controller; + efi_status_t (__efiapi *disconnect_controller)(efi_handle_t, + efi_handle_t, + efi_handle_t); + void *open_protocol; + void *close_protocol; + void *open_protocol_information; + void *protocols_per_handle; + void *locate_handle_buffer; + efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *, + void **); + void *install_multiple_protocol_interfaces; + void *uninstall_multiple_protocol_interfaces; + void *calculate_crc32; + void *copy_mem; + void *set_mem; + void *create_event_ex; + }; + struct { + efi_table_hdr_t hdr; + u32 raise_tpl; + u32 restore_tpl; + u32 allocate_pages; + u32 free_pages; + u32 get_memory_map; + u32 allocate_pool; + u32 free_pool; + u32 create_event; + u32 set_timer; + u32 wait_for_event; + u32 signal_event; + u32 close_event; + u32 check_event; + u32 install_protocol_interface; + u32 reinstall_protocol_interface; + u32 uninstall_protocol_interface; + u32 handle_protocol; + u32 __reserved; + u32 register_protocol_notify; + u32 locate_handle; + u32 locate_device_path; + u32 install_configuration_table; + u32 load_image; + u32 start_image; + u32 exit; + u32 unload_image; + u32 exit_boot_services; + u32 get_next_monotonic_count; + u32 stall; + u32 set_watchdog_timer; + u32 connect_controller; + u32 disconnect_controller; + u32 open_protocol; + u32 close_protocol; + u32 open_protocol_information; + u32 protocols_per_handle; + u32 locate_handle_buffer; + u32 locate_protocol; + u32 install_multiple_protocol_interfaces; + u32 uninstall_multiple_protocol_interfaces; + u32 calculate_crc32; + u32 copy_mem; + u32 set_mem; + u32 create_event_ex; + } mixed_mode; +}; + +typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t; + +union efi_uga_draw_protocol { + struct { + efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *, + u32*, u32*, u32*, u32*); + void *set_mode; + void *blt; + }; + struct { + u32 get_mode; + u32 set_mode; + u32 blt; + } mixed_mode; +}; + +union efi_simple_text_output_protocol { + struct { + void *reset; + efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *, + efi_char16_t *); + void *test_string; + }; + struct { + u32 reset; + u32 output_string; + u32 test_string; + } mixed_mode; +}; + +#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 +#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 +#define PIXEL_BIT_MASK 2 +#define PIXEL_BLT_ONLY 3 +#define PIXEL_FORMAT_MAX 4 + +typedef struct { + u32 red_mask; + u32 green_mask; + u32 blue_mask; + u32 reserved_mask; +} efi_pixel_bitmask_t; + +typedef struct { + u32 version; + u32 horizontal_resolution; + u32 vertical_resolution; + int pixel_format; + efi_pixel_bitmask_t pixel_information; + u32 pixels_per_scan_line; +} efi_graphics_output_mode_info_t; + +typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t; + +union efi_graphics_output_protocol_mode { + struct { + u32 max_mode; + u32 mode; + efi_graphics_output_mode_info_t *info; + unsigned long size_of_info; + efi_physical_addr_t frame_buffer_base; + unsigned long frame_buffer_size; + }; + struct { + u32 max_mode; + u32 mode; + u32 info; + u32 size_of_info; + u64 frame_buffer_base; + u32 frame_buffer_size; + } mixed_mode; +}; + +typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t; + +union efi_graphics_output_protocol { + struct { + void *query_mode; + void *set_mode; + void *blt; + efi_graphics_output_protocol_mode_t *mode; + }; + struct { + u32 query_mode; + u32 set_mode; + u32 blt; + u32 mode; + } mixed_mode; +}; + +typedef union { + struct { + u32 revision; + efi_handle_t parent_handle; + efi_system_table_t *system_table; + efi_handle_t device_handle; + void *file_path; + void *reserved; + u32 load_options_size; + void *load_options; + void *image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + efi_status_t (__efiapi *unload)(efi_handle_t image_handle); + }; + struct { + u32 revision; + u32 parent_handle; + u32 system_table; + u32 device_handle; + u32 file_path; + u32 reserved; + u32 load_options_size; + u32 load_options; + u32 image_base; + __aligned_u64 image_size; + u32 image_code_type; + u32 image_data_type; + u32 unload; + } mixed_mode; +} efi_loaded_image_t; + +typedef struct { + u64 size; + u64 file_size; + u64 phys_size; + efi_time_t create_time; + efi_time_t last_access_time; + efi_time_t modification_time; + __aligned_u64 attribute; + efi_char16_t filename[]; +} efi_file_info_t; + +typedef struct efi_file_protocol efi_file_protocol_t; + +struct efi_file_protocol { + u64 revision; + efi_status_t (__efiapi *open) (efi_file_protocol_t *, + efi_file_protocol_t **, + efi_char16_t *, u64, u64); + efi_status_t (__efiapi *close) (efi_file_protocol_t *); + efi_status_t (__efiapi *delete) (efi_file_protocol_t *); + efi_status_t (__efiapi *read) (efi_file_protocol_t *, + unsigned long *, void *); + efi_status_t (__efiapi *write) (efi_file_protocol_t *, + unsigned long, void *); + efi_status_t (__efiapi *get_position)(efi_file_protocol_t *, u64 *); + efi_status_t (__efiapi *set_position)(efi_file_protocol_t *, u64); + efi_status_t (__efiapi *get_info) (efi_file_protocol_t *, + efi_guid_t *, unsigned long *, + void *); + efi_status_t (__efiapi *set_info) (efi_file_protocol_t *, + efi_guid_t *, unsigned long, + void *); + efi_status_t (__efiapi *flush) (efi_file_protocol_t *); +}; -unsigned long get_dram_base(void); +typedef struct efi_simple_file_system_protocol efi_simple_file_system_protocol_t; + +struct efi_simple_file_system_protocol { + u64 revision; + int (__efiapi *open_volume)(efi_simple_file_system_protocol_t *, + efi_file_protocol_t **); +}; + +#define EFI_FILE_MODE_READ 0x0000000000000001 +#define EFI_FILE_MODE_WRITE 0x0000000000000002 +#define EFI_FILE_MODE_CREATE 0x8000000000000000 + +typedef enum { + EfiPciIoWidthUint8, + EfiPciIoWidthUint16, + EfiPciIoWidthUint32, + EfiPciIoWidthUint64, + EfiPciIoWidthFifoUint8, + EfiPciIoWidthFifoUint16, + EfiPciIoWidthFifoUint32, + EfiPciIoWidthFifoUint64, + EfiPciIoWidthFillUint8, + EfiPciIoWidthFillUint16, + EfiPciIoWidthFillUint32, + EfiPciIoWidthFillUint64, + EfiPciIoWidthMaximum +} EFI_PCI_IO_PROTOCOL_WIDTH; + +typedef enum { + EfiPciIoAttributeOperationGet, + EfiPciIoAttributeOperationSet, + EfiPciIoAttributeOperationEnable, + EfiPciIoAttributeOperationDisable, + EfiPciIoAttributeOperationSupported, + EfiPciIoAttributeOperationMaximum +} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION; + +typedef struct { + u32 read; + u32 write; +} efi_pci_io_protocol_access_32_t; + +typedef union efi_pci_io_protocol efi_pci_io_protocol_t; + +typedef +efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *, + EFI_PCI_IO_PROTOCOL_WIDTH, + u32 offset, + unsigned long count, + void *buffer); + +typedef struct { + void *read; + void *write; +} efi_pci_io_protocol_access_t; + +typedef struct { + efi_pci_io_protocol_cfg_t read; + efi_pci_io_protocol_cfg_t write; +} efi_pci_io_protocol_config_access_t; + +union efi_pci_io_protocol { + struct { + void *poll_mem; + void *poll_io; + efi_pci_io_protocol_access_t mem; + efi_pci_io_protocol_access_t io; + efi_pci_io_protocol_config_access_t pci; + void *copy_mem; + void *map; + void *unmap; + void *allocate_buffer; + void *free_buffer; + void *flush; + efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *, + unsigned long *segment_nr, + unsigned long *bus_nr, + unsigned long *device_nr, + unsigned long *func_nr); + void *attributes; + void *get_bar_attributes; + void *set_bar_attributes; + uint64_t romsize; + void *romimage; + }; + struct { + u32 poll_mem; + u32 poll_io; + efi_pci_io_protocol_access_32_t mem; + efi_pci_io_protocol_access_32_t io; + efi_pci_io_protocol_access_32_t pci; + u32 copy_mem; + u32 map; + u32 unmap; + u32 allocate_buffer; + u32 free_buffer; + u32 flush; + u32 get_location; + u32 attributes; + u32 get_bar_attributes; + u32 set_bar_attributes; + u64 romsize; + u32 romimage; + } mixed_mode; +}; + +#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004 +#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010 +#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020 +#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080 +#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200 +#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000 +#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 + +struct efi_dev_path; + +typedef union apple_properties_protocol apple_properties_protocol_t; + +union apple_properties_protocol { + struct { + unsigned long version; + efi_status_t (__efiapi *get)(apple_properties_protocol_t *, + struct efi_dev_path *, + efi_char16_t *, void *, u32 *); + efi_status_t (__efiapi *set)(apple_properties_protocol_t *, + struct efi_dev_path *, + efi_char16_t *, void *, u32); + efi_status_t (__efiapi *del)(apple_properties_protocol_t *, + struct efi_dev_path *, + efi_char16_t *); + efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *, + void *buffer, u32 *); + }; + struct { + u32 version; + u32 get; + u32 set; + u32 del; + u32 get_all; + } mixed_mode; +}; + +typedef u32 efi_tcg2_event_log_format; + +typedef union efi_tcg2_protocol efi_tcg2_protocol_t; + +union efi_tcg2_protocol { + struct { + void *get_capability; + efi_status_t (__efiapi *get_event_log)(efi_handle_t, + efi_tcg2_event_log_format, + efi_physical_addr_t *, + efi_physical_addr_t *, + efi_bool_t *); + void *hash_log_extend_event; + void *submit_command; + void *get_active_pcr_banks; + void *set_active_pcr_banks; + void *get_result_of_set_active_pcr_banks; + }; + struct { + u32 get_capability; + u32 get_event_log; + u32 hash_log_extend_event; + u32 submit_command; + u32 get_active_pcr_banks; + u32 set_active_pcr_banks; + u32 get_result_of_set_active_pcr_banks; + } mixed_mode; +}; + +typedef union efi_load_file_protocol efi_load_file_protocol_t; +typedef union efi_load_file_protocol efi_load_file2_protocol_t; + +union efi_load_file_protocol { + struct { + efi_status_t (__efiapi *load_file)(efi_load_file_protocol_t *, + efi_device_path_protocol_t *, + bool, unsigned long *, void *); + }; + struct { + u32 load_file; + } mixed_mode; +}; + +void efi_pci_disable_bridge_busmaster(void); + +typedef efi_status_t (*efi_exit_boot_map_processing)( + struct efi_boot_memmap *map, + void *priv); + +efi_status_t efi_exit_boot_services(void *handle, + struct efi_boot_memmap *map, + void *priv, + efi_exit_boot_map_processing priv_func); + +void efi_char16_printk(efi_char16_t *); efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, @@ -71,23 +612,57 @@ efi_status_t check_platform_features(void); void *get_efi_config_table(efi_guid_t guid); -/* Helper macros for the usual case of using simple C variables: */ -#ifndef fdt_setprop_inplace_var -#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \ - fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var)) -#endif +void efi_printk(char *str); -#ifndef fdt_setprop_var -#define fdt_setprop_var(fdt, node_offset, name, var) \ - fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) -#endif +void efi_free(unsigned long size, unsigned long addr); -#define get_efi_var(name, vendor, ...) \ - efi_rt_call(get_variable, (efi_char16_t *)(name), \ - (efi_guid_t *)(vendor), __VA_ARGS__) +char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len, + unsigned long max_addr); -#define set_efi_var(name, vendor, ...) \ - efi_rt_call(set_variable, (efi_char16_t *)(name), \ - (efi_guid_t *)(vendor), __VA_ARGS__) +efi_status_t efi_get_memory_map(struct efi_boot_memmap *map); + +efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, + unsigned long *addr, unsigned long min); + +static inline +efi_status_t efi_low_alloc(unsigned long size, unsigned long align, + unsigned long *addr) +{ + /* + * Don't allocate at 0x0. It will confuse code that + * checks pointers against NULL. Skip the first 8 + * bytes so we start at a nice even number. + */ + return efi_low_alloc_above(size, align, addr, 0x8); +} + +efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr, + unsigned long max); + +efi_status_t efi_relocate_kernel(unsigned long *image_addr, + unsigned long image_size, + unsigned long alloc_size, + unsigned long preferred_addr, + unsigned long alignment, + unsigned long min_addr); + +efi_status_t efi_parse_options(char const *cmdline); + +efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, + unsigned long size); + +efi_status_t efi_load_dtb(efi_loaded_image_t *image, + unsigned long *load_addr, + unsigned long *load_size); + +efi_status_t efi_load_initrd(efi_loaded_image_t *image, + unsigned long *load_addr, + unsigned long *load_size, + unsigned long soft_limit, + unsigned long hard_limit); + +efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, + unsigned long *load_size, + unsigned long max); #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 0a91e5232127..46cffac7a5f1 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -199,10 +199,6 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) return EFI_SUCCESS; } -#ifndef EFI_FDT_ALIGN -# define EFI_FDT_ALIGN EFI_PAGE_SIZE -#endif - struct exit_boot_struct { efi_memory_desc_t *runtime_map; int *runtime_entry_count; @@ -281,8 +277,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, pr_efi("Exiting boot services and installing virtual address map...\n"); map.map = &memory_map; - status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN, - new_fdt_addr, max_addr); + status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr); if (status != EFI_SUCCESS) { pr_efi_err("Unable to allocate memory for new device tree.\n"); goto fail; diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c new file mode 100644 index 000000000000..d4c7e5f59d2c --- /dev/null +++ b/drivers/firmware/efi/libstub/file.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helper functions used by the EFI stub on multiple + * architectures. This should be #included by the EFI stub + * implementation files. + * + * Copyright 2011 Intel Corporation; author Matt Fleming + */ + +#include <linux/efi.h> +#include <asm/efi.h> + +#include "efistub.h" + +#define MAX_FILENAME_SIZE 256 + +/* + * Some firmware implementations have problems reading files in one go. + * A read chunk size of 1MB seems to work for most platforms. + * + * Unfortunately, reading files in chunks triggers *other* bugs on some + * platforms, so we provide a way to disable this workaround, which can + * be done by passing "efi=nochunk" on the EFI boot stub command line. + * + * If you experience issues with initrd images being corrupt it's worth + * trying efi=nochunk, but chunking is enabled by default on x86 because + * there are far more machines that require the workaround than those that + * break with it enabled. + */ +#define EFI_READ_CHUNK_SIZE SZ_1M + +static efi_status_t efi_open_file(efi_file_protocol_t *volume, + efi_char16_t *filename_16, + efi_file_protocol_t **handle, + unsigned long *file_size) +{ + struct { + efi_file_info_t info; + efi_char16_t filename[MAX_FILENAME_SIZE]; + } finfo; + efi_guid_t info_guid = EFI_FILE_INFO_ID; + efi_file_protocol_t *fh; + unsigned long info_sz; + efi_status_t status; + + status = volume->open(volume, &fh, filename_16, EFI_FILE_MODE_READ, 0); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to open file: "); + efi_char16_printk(filename_16); + efi_printk("\n"); + return status; + } + + info_sz = sizeof(finfo); + status = fh->get_info(fh, &info_guid, &info_sz, &finfo); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to get file info\n"); + fh->close(fh); + return status; + } + + *handle = fh; + *file_size = finfo.info.file_size; + return EFI_SUCCESS; +} + +static efi_status_t efi_open_volume(efi_loaded_image_t *image, + efi_file_protocol_t **fh) +{ + efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; + efi_simple_file_system_protocol_t *io; + efi_status_t status; + + status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto, + (void **)&io); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to handle fs_proto\n"); + return status; + } + + status = io->open_volume(io, fh); + if (status != EFI_SUCCESS) + pr_efi_err("Failed to open volume\n"); + + return status; +} + +static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, + const efi_char16_t *prefix, int prefix_size, + efi_char16_t *result, int result_len) +{ + int prefix_len = prefix_size / 2; + bool found = false; + int i; + + for (i = prefix_len; i < cmdline_len; i++) { + if (!memcmp(&cmdline[i - prefix_len], prefix, prefix_size)) { + found = true; + break; + } + } + + if (!found) + return 0; + + while (--result_len > 0 && i < cmdline_len) { + if (cmdline[i] == L'\0' || + cmdline[i] == L'\n' || + cmdline[i] == L' ') + break; + *result++ = cmdline[i++]; + } + *result = L'\0'; + return i; +} + +/* + * Check the cmdline for a LILO-style file= arguments. + * + * We only support loading a file from the same filesystem as + * the kernel image. + */ +static efi_status_t handle_cmdline_files(efi_loaded_image_t *image, + const efi_char16_t *optstr, + int optstr_size, + unsigned long soft_limit, + unsigned long hard_limit, + unsigned long *load_addr, + unsigned long *load_size) +{ + const efi_char16_t *cmdline = image->load_options; + int cmdline_len = image->load_options_size / 2; + unsigned long efi_chunk_size = ULONG_MAX; + efi_file_protocol_t *volume = NULL; + efi_file_protocol_t *file; + unsigned long alloc_addr; + unsigned long alloc_size; + efi_status_t status; + int offset; + + if (!load_addr || !load_size) + return EFI_INVALID_PARAMETER; + + if (IS_ENABLED(CONFIG_X86) && !nochunk()) + efi_chunk_size = EFI_READ_CHUNK_SIZE; + + alloc_addr = alloc_size = 0; + do { + efi_char16_t filename[MAX_FILENAME_SIZE]; + unsigned long size; + void *addr; + + offset = find_file_option(cmdline, cmdline_len, + optstr, optstr_size, + filename, ARRAY_SIZE(filename)); + + if (!offset) + break; + + cmdline += offset; + cmdline_len -= offset; + + if (!volume) { + status = efi_open_volume(image, &volume); + if (status != EFI_SUCCESS) + return status; + } + + status = efi_open_file(volume, filename, &file, &size); + if (status != EFI_SUCCESS) + goto err_close_volume; + + /* + * Check whether the existing allocation can contain the next + * file. This condition will also trigger naturally during the + * first (and typically only) iteration of the loop, given that + * alloc_size == 0 in that case. + */ + if (round_up(alloc_size + size, EFI_ALLOC_ALIGN) > + round_up(alloc_size, EFI_ALLOC_ALIGN)) { + unsigned long old_addr = alloc_addr; + + status = EFI_OUT_OF_RESOURCES; + if (soft_limit < hard_limit) + status = efi_allocate_pages(alloc_size + size, + &alloc_addr, + soft_limit); + if (status == EFI_OUT_OF_RESOURCES) + status = efi_allocate_pages(alloc_size + size, + &alloc_addr, + hard_limit); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to allocate memory for files\n"); + goto err_close_file; + } + + if (old_addr != 0) { + /* + * This is not the first time we've gone + * around this loop, and so we are loading + * multiple files that need to be concatenated + * and returned in a single buffer. + */ + memcpy((void *)alloc_addr, (void *)old_addr, alloc_size); + efi_free(alloc_size, old_addr); + } + } + + addr = (void *)alloc_addr + alloc_size; + alloc_size += size; + + while (size) { + unsigned long chunksize = min(size, efi_chunk_size); + + status = file->read(file, &chunksize, addr); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to read file\n"); + goto err_close_file; + } + addr += chunksize; + size -= chunksize; + } + file->close(file); + } while (offset > 0); + + *load_addr = alloc_addr; + *load_size = alloc_size; + + if (volume) + volume->close(volume); + return EFI_SUCCESS; + +err_close_file: + file->close(file); + +err_close_volume: + volume->close(volume); + efi_free(alloc_size, alloc_addr); + return status; +} + +efi_status_t efi_load_dtb(efi_loaded_image_t *image, + unsigned long *load_addr, + unsigned long *load_size) +{ + return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2, + ULONG_MAX, ULONG_MAX, load_addr, load_size); +} + +efi_status_t efi_load_initrd(efi_loaded_image_t *image, + unsigned long *load_addr, + unsigned long *load_size, + unsigned long soft_limit, + unsigned long hard_limit) +{ + return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2, + soft_limit, hard_limit, load_addr, load_size); +} diff --git a/drivers/firmware/efi/libstub/hidden.h b/drivers/firmware/efi/libstub/hidden.h new file mode 100644 index 000000000000..3493b041f419 --- /dev/null +++ b/drivers/firmware/efi/libstub/hidden.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * To prevent the compiler from emitting GOT-indirected (and thus absolute) + * references to any global symbols, override their visibility as 'hidden' + */ +#pragma GCC visibility push(hidden) diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c new file mode 100644 index 000000000000..869a79c8946f --- /dev/null +++ b/drivers/firmware/efi/libstub/mem.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <asm/efi.h> + +#include "efistub.h" + +#define EFI_MMAP_NR_SLACK_SLOTS 8 + +static inline bool mmap_has_headroom(unsigned long buff_size, + unsigned long map_size, + unsigned long desc_size) +{ + unsigned long slack = buff_size - map_size; + + return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; +} + +/** + * efi_get_memory_map() - get memory map + * @map: on return pointer to memory map + * + * Retrieve the UEFI memory map. The allocated memory leaves room for + * up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries. + * + * Return: status code + */ +efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) +{ + efi_memory_desc_t *m = NULL; + efi_status_t status; + unsigned long key; + u32 desc_version; + + *map->desc_size = sizeof(*m); + *map->map_size = *map->desc_size * 32; + *map->buff_size = *map->map_size; +again: + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + *map->map_size, (void **)&m); + if (status != EFI_SUCCESS) + goto fail; + + *map->desc_size = 0; + key = 0; + status = efi_bs_call(get_memory_map, map->map_size, m, + &key, map->desc_size, &desc_version); + if (status == EFI_BUFFER_TOO_SMALL || + !mmap_has_headroom(*map->buff_size, *map->map_size, + *map->desc_size)) { + efi_bs_call(free_pool, m); + /* + * Make sure there is some entries of headroom so that the + * buffer can be reused for a new map after allocations are + * no longer permitted. Its unlikely that the map will grow to + * exceed this headroom once we are ready to trigger + * ExitBootServices() + */ + *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; + *map->buff_size = *map->map_size; + goto again; + } + + if (status == EFI_SUCCESS) { + if (map->key_ptr) + *map->key_ptr = key; + if (map->desc_ver) + *map->desc_ver = desc_version; + } else { + efi_bs_call(free_pool, m); + } + +fail: + *map->map = m; + return status; +} + +/** + * efi_allocate_pages() - Allocate memory pages + * @size: minimum number of bytes to allocate + * @addr: On return the address of the first allocated page. The first + * allocated page has alignment EFI_ALLOC_ALIGN which is an + * architecture dependent multiple of the page size. + * @max: the address that the last allocated memory page shall not + * exceed + * + * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according + * to EFI_ALLOC_ALIGN. The last allocated page will not exceed the address + * given by @max. + * + * Return: status code + */ +efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr, + unsigned long max) +{ + efi_physical_addr_t alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1; + int slack = EFI_ALLOC_ALIGN / EFI_PAGE_SIZE - 1; + efi_status_t status; + + size = round_up(size, EFI_ALLOC_ALIGN); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, + EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack, + &alloc_addr); + if (status != EFI_SUCCESS) + return status; + + *addr = ALIGN((unsigned long)alloc_addr, EFI_ALLOC_ALIGN); + + if (slack > 0) { + int l = (alloc_addr % EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; + + if (l) { + efi_bs_call(free_pages, alloc_addr, slack - l + 1); + slack = l - 1; + } + if (slack) + efi_bs_call(free_pages, *addr + size, slack); + } + return EFI_SUCCESS; +} +/** + * efi_low_alloc_above() - allocate pages at or above given address + * @size: size of the memory area to allocate + * @align: minimum alignment of the allocated memory area. It should + * a power of two. + * @addr: on exit the address of the allocated memory + * @min: minimum address to used for the memory allocation + * + * Allocate at the lowest possible address that is not below @min as + * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at + * least EFI_ALLOC_ALIGN. The first allocated page will not below the address + * given by @min. + * + * Return: status code + */ +efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, + unsigned long *addr, unsigned long min) +{ + unsigned long map_size, desc_size, buff_size; + efi_memory_desc_t *map; + efi_status_t status; + unsigned long nr_pages; + int i; + struct efi_boot_memmap boot_map; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(&boot_map); + if (status != EFI_SUCCESS) + goto fail; + + /* + * Enforce minimum alignment that EFI or Linux requires when + * requesting a specific address. We are doing page-based (or + * larger) allocations, and both the address and size must meet + * alignment constraints. + */ + if (align < EFI_ALLOC_ALIGN) + align = EFI_ALLOC_ALIGN; + + size = round_up(size, EFI_ALLOC_ALIGN); + nr_pages = size / EFI_PAGE_SIZE; + for (i = 0; i < map_size / desc_size; i++) { + efi_memory_desc_t *desc; + unsigned long m = (unsigned long)map; + u64 start, end; + + desc = efi_early_memdesc_ptr(m, desc_size, i); + + if (desc->type != EFI_CONVENTIONAL_MEMORY) + continue; + + if (efi_soft_reserve_enabled() && + (desc->attribute & EFI_MEMORY_SP)) + continue; + + if (desc->num_pages < nr_pages) + continue; + + start = desc->phys_addr; + end = start + desc->num_pages * EFI_PAGE_SIZE; + + if (start < min) + start = min; + + start = round_up(start, align); + if ((start + size) > end) + continue; + + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &start); + if (status == EFI_SUCCESS) { + *addr = start; + break; + } + } + + if (i == map_size / desc_size) + status = EFI_NOT_FOUND; + + efi_bs_call(free_pool, map); +fail: + return status; +} + +/** + * efi_free() - free memory pages + * @size: size of the memory area to free in bytes + * @addr: start of the memory area to free (must be EFI_PAGE_SIZE + * aligned) + * + * @size is rounded up to a multiple of EFI_ALLOC_ALIGN which is an + * architecture specific multiple of EFI_PAGE_SIZE. So this function should + * only be used to return pages allocated with efi_allocate_pages() or + * efi_low_alloc_above(). + */ +void efi_free(unsigned long size, unsigned long addr) +{ + unsigned long nr_pages; + + if (!size) + return; + + nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; + efi_bs_call(free_pages, addr, nr_pages); +} + +/** + * efi_relocate_kernel() - copy memory area + * @image_addr: pointer to address of memory area to copy + * @image_size: size of memory area to copy + * @alloc_size: minimum size of memory to allocate, must be greater or + * equal to image_size + * @preferred_addr: preferred target address + * @alignment: minimum alignment of the allocated memory area. It + * should be a power of two. + * @min_addr: minimum target address + * + * Copy a memory area to a newly allocated memory area aligned according + * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address + * is not available, the allocated address will not be below @min_addr. + * On exit, @image_addr is updated to the target copy address that was used. + * + * This function is used to copy the Linux kernel verbatim. It does not apply + * any relocation changes. + * + * Return: status code + */ +efi_status_t efi_relocate_kernel(unsigned long *image_addr, + unsigned long image_size, + unsigned long alloc_size, + unsigned long preferred_addr, + unsigned long alignment, + unsigned long min_addr) +{ + unsigned long cur_image_addr; + unsigned long new_addr = 0; + efi_status_t status; + unsigned long nr_pages; + efi_physical_addr_t efi_addr = preferred_addr; + + if (!image_addr || !image_size || !alloc_size) + return EFI_INVALID_PARAMETER; + if (alloc_size < image_size) + return EFI_INVALID_PARAMETER; + + cur_image_addr = *image_addr; + + /* + * The EFI firmware loader could have placed the kernel image + * anywhere in memory, but the kernel has restrictions on the + * max physical address it can run at. Some architectures + * also have a prefered address, so first try to relocate + * to the preferred address. If that fails, allocate as low + * as possible while respecting the required alignment. + */ + nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &efi_addr); + new_addr = efi_addr; + /* + * If preferred address allocation failed allocate as low as + * possible. + */ + if (status != EFI_SUCCESS) { + status = efi_low_alloc_above(alloc_size, alignment, &new_addr, + min_addr); + } + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to allocate usable memory for kernel.\n"); + return status; + } + + /* + * We know source/dest won't overlap since both memory ranges + * have been allocated by UEFI, so we can safely use memcpy. + */ + memcpy((void *)new_addr, (void *)cur_image_addr, image_size); + + /* Return the new address of the relocated image. */ + *image_addr = new_addr; + + return status; +} diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 316ce9ff0193..24aa37535372 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -4,7 +4,6 @@ */ #include <linux/efi.h> -#include <linux/log2.h> #include <asm/efi.h> #include "efistub.h" @@ -26,6 +25,17 @@ union efi_rng_protocol { } mixed_mode; }; +/** + * efi_get_random_bytes() - fill a buffer with random bytes + * @size: size of the buffer + * @out: caller allocated buffer to receive the random bytes + * + * The call will fail if either the firmware does not implement the + * EFI_RNG_PROTOCOL or there are not enough random bytes available to fill + * the buffer. + * + * Return: status code + */ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; @@ -39,119 +49,19 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) return efi_call_proto(rng, get_rng, NULL, size, out); } -/* - * Return the number of slots covered by this entry, i.e., the number of - * addresses it covers that are suitably aligned and supply enough room - * for the allocation. +/** + * efi_random_get_seed() - provide random seed as configuration table + * + * The EFI_RNG_PROTOCOL is used to read random bytes. These random bytes are + * saved as a configuration table which can be used as entropy by the kernel + * for the initialization of its pseudo random number generator. + * + * If the EFI_RNG_PROTOCOL is not available or there are not enough random bytes + * available, the configuration table will not be installed and an error code + * will be returned. + * + * Return: status code */ -static unsigned long get_entry_num_slots(efi_memory_desc_t *md, - unsigned long size, - unsigned long align_shift) -{ - unsigned long align = 1UL << align_shift; - u64 first_slot, last_slot, region_end; - - if (md->type != EFI_CONVENTIONAL_MEMORY) - return 0; - - if (efi_soft_reserve_enabled() && - (md->attribute & EFI_MEMORY_SP)) - return 0; - - region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1); - - first_slot = round_up(md->phys_addr, align); - last_slot = round_down(region_end - size + 1, align); - - if (first_slot > last_slot) - return 0; - - return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1; -} - -/* - * The UEFI memory descriptors have a virtual address field that is only used - * when installing the virtual mapping using SetVirtualAddressMap(). Since it - * is unused here, we can reuse it to keep track of each descriptor's slot - * count. - */ -#define MD_NUM_SLOTS(md) ((md)->virt_addr) - -efi_status_t efi_random_alloc(unsigned long size, - unsigned long align, - unsigned long *addr, - unsigned long random_seed) -{ - unsigned long map_size, desc_size, total_slots = 0, target_slot; - unsigned long buff_size; - efi_status_t status; - efi_memory_desc_t *memory_map; - int map_offset; - struct efi_boot_memmap map; - - map.map = &memory_map; - map.map_size = &map_size; - map.desc_size = &desc_size; - map.desc_ver = NULL; - map.key_ptr = NULL; - map.buff_size = &buff_size; - - status = efi_get_memory_map(&map); - if (status != EFI_SUCCESS) - return status; - - if (align < EFI_ALLOC_ALIGN) - align = EFI_ALLOC_ALIGN; - - /* count the suitable slots in each memory map entry */ - for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { - efi_memory_desc_t *md = (void *)memory_map + map_offset; - unsigned long slots; - - slots = get_entry_num_slots(md, size, ilog2(align)); - MD_NUM_SLOTS(md) = slots; - total_slots += slots; - } - - /* find a random number between 0 and total_slots */ - target_slot = (total_slots * (u16)random_seed) >> 16; - - /* - * target_slot is now a value in the range [0, total_slots), and so - * it corresponds with exactly one of the suitable slots we recorded - * when iterating over the memory map the first time around. - * - * So iterate over the memory map again, subtracting the number of - * slots of each entry at each iteration, until we have found the entry - * that covers our chosen slot. Use the residual value of target_slot - * to calculate the randomly chosen address, and allocate it directly - * using EFI_ALLOCATE_ADDRESS. - */ - for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { - efi_memory_desc_t *md = (void *)memory_map + map_offset; - efi_physical_addr_t target; - unsigned long pages; - - if (target_slot >= MD_NUM_SLOTS(md)) { - target_slot -= MD_NUM_SLOTS(md); - continue; - } - - target = round_up(md->phys_addr, align) + target_slot * align; - pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; - - status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, pages, &target); - if (status == EFI_SUCCESS) - *addr = target; - break; - } - - efi_bs_call(free_pool, memory_map); - - return status; -} - efi_status_t efi_random_get_seed(void) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c new file mode 100644 index 000000000000..4578f59e160c --- /dev/null +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org> + */ + +#include <linux/efi.h> +#include <linux/log2.h> +#include <asm/efi.h> + +#include "efistub.h" + +/* + * Return the number of slots covered by this entry, i.e., the number of + * addresses it covers that are suitably aligned and supply enough room + * for the allocation. + */ +static unsigned long get_entry_num_slots(efi_memory_desc_t *md, + unsigned long size, + unsigned long align_shift) +{ + unsigned long align = 1UL << align_shift; + u64 first_slot, last_slot, region_end; + + if (md->type != EFI_CONVENTIONAL_MEMORY) + return 0; + + if (efi_soft_reserve_enabled() && + (md->attribute & EFI_MEMORY_SP)) + return 0; + + region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, + (u64)ULONG_MAX); + + first_slot = round_up(md->phys_addr, align); + last_slot = round_down(region_end - size + 1, align); + + if (first_slot > last_slot) + return 0; + + return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1; +} + +/* + * The UEFI memory descriptors have a virtual address field that is only used + * when installing the virtual mapping using SetVirtualAddressMap(). Since it + * is unused here, we can reuse it to keep track of each descriptor's slot + * count. + */ +#define MD_NUM_SLOTS(md) ((md)->virt_addr) + +efi_status_t efi_random_alloc(unsigned long size, + unsigned long align, + unsigned long *addr, + unsigned long random_seed) +{ + unsigned long map_size, desc_size, total_slots = 0, target_slot; + unsigned long buff_size; + efi_status_t status; + efi_memory_desc_t *memory_map; + int map_offset; + struct efi_boot_memmap map; + + map.map = &memory_map; + map.map_size = &map_size; + map.desc_size = &desc_size; + map.desc_ver = NULL; + map.key_ptr = NULL; + map.buff_size = &buff_size; + + status = efi_get_memory_map(&map); + if (status != EFI_SUCCESS) + return status; + + if (align < EFI_ALLOC_ALIGN) + align = EFI_ALLOC_ALIGN; + + /* count the suitable slots in each memory map entry */ + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + unsigned long slots; + + slots = get_entry_num_slots(md, size, ilog2(align)); + MD_NUM_SLOTS(md) = slots; + total_slots += slots; + } + + /* find a random number between 0 and total_slots */ + target_slot = (total_slots * (u16)random_seed) >> 16; + + /* + * target_slot is now a value in the range [0, total_slots), and so + * it corresponds with exactly one of the suitable slots we recorded + * when iterating over the memory map the first time around. + * + * So iterate over the memory map again, subtracting the number of + * slots of each entry at each iteration, until we have found the entry + * that covers our chosen slot. Use the residual value of target_slot + * to calculate the randomly chosen address, and allocate it directly + * using EFI_ALLOCATE_ADDRESS. + */ + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + efi_physical_addr_t target; + unsigned long pages; + + if (target_slot >= MD_NUM_SLOTS(md)) { + target_slot -= MD_NUM_SLOTS(md); + continue; + } + + target = round_up(md->phys_addr, align) + target_slot * align; + pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; + + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, pages, &target); + if (status == EFI_SUCCESS) + *addr = target; + break; + } + + efi_bs_call(free_pool, memory_map); + + return status; +} diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c new file mode 100644 index 000000000000..a700b3c7f7d0 --- /dev/null +++ b/drivers/firmware/efi/libstub/skip_spaces.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/ctype.h> +#include <linux/types.h> + +char *skip_spaces(const char *str) +{ + while (isspace(*str)) + ++str; + return (char *)str; +} diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c index ed10e3f602c5..1ac2f8764715 100644 --- a/drivers/firmware/efi/libstub/string.c +++ b/drivers/firmware/efi/libstub/string.c @@ -6,6 +6,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include <linux/ctype.h> #include <linux/types.h> #include <linux/string.h> @@ -56,3 +57,58 @@ int strncmp(const char *cs, const char *ct, size_t count) return 0; } #endif + +/* Works only for digits and letters, but small and fast */ +#define TOLOWER(x) ((x) | 0x20) + +static unsigned int simple_guess_base(const char *cp) +{ + if (cp[0] == '0') { + if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) + return 16; + else + return 8; + } else { + return 10; + } +} + +/** + * simple_strtoull - convert a string to an unsigned long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ + +unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) +{ + unsigned long long result = 0; + + if (!base) + base = simple_guess_base(cp); + + if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') + cp += 2; + + while (isxdigit(*cp)) { + unsigned int value; + + value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; + if (value >= base) + break; + result = result * base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + + return result; +} + +long simple_strtol(const char *cp, char **endp, unsigned int base) +{ + if (*cp == '-') + return -simple_strtoull(cp + 1, endp, base); + + return simple_strtoull(cp, endp, base); +} diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c new file mode 100644 index 000000000000..8d3a707789de --- /dev/null +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -0,0 +1,837 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* ----------------------------------------------------------------------- + * + * Copyright 2011 Intel Corporation; author Matt Fleming + * + * ----------------------------------------------------------------------- */ + +#include <linux/efi.h> +#include <linux/pci.h> + +#include <asm/efi.h> +#include <asm/e820/types.h> +#include <asm/setup.h> +#include <asm/desc.h> +#include <asm/boot.h> + +#include "efistub.h" + +/* Maximum physical address for 64-bit kernel with 4-level paging */ +#define MAXMEM_X86_64_4LEVEL (1ull << 46) + +static efi_system_table_t *sys_table; +extern const bool efi_is64; +extern u32 image_offset; + +__pure efi_system_table_t *efi_system_table(void) +{ + return sys_table; +} + +__attribute_const__ bool efi_is_64bit(void) +{ + if (IS_ENABLED(CONFIG_EFI_MIXED)) + return efi_is64; + return IS_ENABLED(CONFIG_X86_64); +} + +static efi_status_t +preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) +{ + struct pci_setup_rom *rom = NULL; + efi_status_t status; + unsigned long size; + uint64_t romsize; + void *romimage; + + /* + * Some firmware images contain EFI function pointers at the place where + * the romimage and romsize fields are supposed to be. Typically the EFI + * code is mapped at high addresses, translating to an unrealistically + * large romsize. The UEFI spec limits the size of option ROMs to 16 + * MiB so we reject any ROMs over 16 MiB in size to catch this. + */ + romimage = efi_table_attr(pci, romimage); + romsize = efi_table_attr(pci, romsize); + if (!romimage || !romsize || romsize > SZ_16M) + return EFI_INVALID_PARAMETER; + + size = romsize + sizeof(*rom); + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&rom); + if (status != EFI_SUCCESS) { + efi_printk("Failed to allocate memory for 'rom'\n"); + return status; + } + + memset(rom, 0, sizeof(*rom)); + + rom->data.type = SETUP_PCI; + rom->data.len = size - sizeof(struct setup_data); + rom->data.next = 0; + rom->pcilen = pci->romsize; + *__rom = rom; + + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_VENDOR_ID, 1, &rom->vendor); + + if (status != EFI_SUCCESS) { + efi_printk("Failed to read rom->vendor\n"); + goto free_struct; + } + + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_DEVICE_ID, 1, &rom->devid); + + if (status != EFI_SUCCESS) { + efi_printk("Failed to read rom->devid\n"); + goto free_struct; + } + + status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus, + &rom->device, &rom->function); + + if (status != EFI_SUCCESS) + goto free_struct; + + memcpy(rom->romdata, romimage, romsize); + return status; + +free_struct: + efi_bs_call(free_pool, rom); + return status; +} + +/* + * There's no way to return an informative status from this function, + * because any analysis (and printing of error messages) needs to be + * done directly at the EFI function call-site. + * + * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we + * just didn't find any PCI devices, but there's no way to tell outside + * the context of the call. + */ +static void setup_efi_pci(struct boot_params *params) +{ + efi_status_t status; + void **pci_handle = NULL; + efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; + unsigned long size = 0; + struct setup_data *data; + efi_handle_t h; + int i; + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &pci_proto, NULL, &size, pci_handle); + + if (status == EFI_BUFFER_TOO_SMALL) { + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&pci_handle); + + if (status != EFI_SUCCESS) { + efi_printk("Failed to allocate memory for 'pci_handle'\n"); + return; + } + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &pci_proto, NULL, &size, pci_handle); + } + + if (status != EFI_SUCCESS) + goto free_handle; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + for_each_efi_handle(h, pci_handle, size, i) { + efi_pci_io_protocol_t *pci = NULL; + struct pci_setup_rom *rom; + + status = efi_bs_call(handle_protocol, h, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS || !pci) + continue; + + status = preserve_pci_rom_image(pci, &rom); + if (status != EFI_SUCCESS) + continue; + + if (data) + data->next = (unsigned long)rom; + else + params->hdr.setup_data = (unsigned long)rom; + + data = (struct setup_data *)rom; + } + +free_handle: + efi_bs_call(free_pool, pci_handle); +} + +static void retrieve_apple_device_properties(struct boot_params *boot_params) +{ + efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID; + struct setup_data *data, *new; + efi_status_t status; + u32 size = 0; + apple_properties_protocol_t *p; + + status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p); + if (status != EFI_SUCCESS) + return; + + if (efi_table_attr(p, version) != 0x10000) { + efi_printk("Unsupported properties proto version\n"); + return; + } + + efi_call_proto(p, get_all, NULL, &size); + if (!size) + return; + + do { + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + size + sizeof(struct setup_data), + (void **)&new); + if (status != EFI_SUCCESS) { + efi_printk("Failed to allocate memory for 'properties'\n"); + return; + } + + status = efi_call_proto(p, get_all, new->data, &size); + + if (status == EFI_BUFFER_TOO_SMALL) + efi_bs_call(free_pool, new); + } while (status == EFI_BUFFER_TOO_SMALL); + + new->type = SETUP_APPLE_PROPERTIES; + new->len = size; + new->next = 0; + + data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data; + if (!data) { + boot_params->hdr.setup_data = (unsigned long)new; + } else { + while (data->next) + data = (struct setup_data *)(unsigned long)data->next; + data->next = (unsigned long)new; + } +} + +static const efi_char16_t apple[] = L"Apple"; + +static void setup_quirks(struct boot_params *boot_params) +{ + efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long) + efi_table_attr(efi_system_table(), fw_vendor); + + if (!memcmp(fw_vendor, apple, sizeof(apple))) { + if (IS_ENABLED(CONFIG_APPLE_PROPERTIES)) + retrieve_apple_device_properties(boot_params); + } +} + +/* + * See if we have Universal Graphics Adapter (UGA) protocol + */ +static efi_status_t +setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size) +{ + efi_status_t status; + u32 width, height; + void **uga_handle = NULL; + efi_uga_draw_protocol_t *uga = NULL, *first_uga; + efi_handle_t handle; + int i; + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&uga_handle); + if (status != EFI_SUCCESS) + return status; + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + uga_proto, NULL, &size, uga_handle); + if (status != EFI_SUCCESS) + goto free_handle; + + height = 0; + width = 0; + + first_uga = NULL; + for_each_efi_handle(handle, uga_handle, size, i) { + efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; + u32 w, h, depth, refresh; + void *pciio; + + status = efi_bs_call(handle_protocol, handle, uga_proto, + (void **)&uga); + if (status != EFI_SUCCESS) + continue; + + pciio = NULL; + efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio); + + status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh); + if (status == EFI_SUCCESS && (!first_uga || pciio)) { + width = w; + height = h; + + /* + * Once we've found a UGA supporting PCIIO, + * don't bother looking any further. + */ + if (pciio) + break; + + first_uga = uga; + } + } + + if (!width && !height) + goto free_handle; + + /* EFI framebuffer */ + si->orig_video_isVGA = VIDEO_TYPE_EFI; + + si->lfb_depth = 32; + si->lfb_width = width; + si->lfb_height = height; + + si->red_size = 8; + si->red_pos = 16; + si->green_size = 8; + si->green_pos = 8; + si->blue_size = 8; + si->blue_pos = 0; + si->rsvd_size = 8; + si->rsvd_pos = 24; + +free_handle: + efi_bs_call(free_pool, uga_handle); + + return status; +} + +static void setup_graphics(struct boot_params *boot_params) +{ + efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; + struct screen_info *si; + efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; + efi_status_t status; + unsigned long size; + void **gop_handle = NULL; + void **uga_handle = NULL; + + si = &boot_params->screen_info; + memset(si, 0, sizeof(*si)); + + size = 0; + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &graphics_proto, NULL, &size, gop_handle); + if (status == EFI_BUFFER_TOO_SMALL) + status = efi_setup_gop(si, &graphics_proto, size); + + if (status != EFI_SUCCESS) { + size = 0; + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &uga_proto, NULL, &size, uga_handle); + if (status == EFI_BUFFER_TOO_SMALL) + setup_uga(si, &uga_proto, size); + } +} + + +static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status) +{ + efi_bs_call(exit, handle, status, 0, NULL); + for(;;) + asm("hlt"); +} + +void startup_32(struct boot_params *boot_params); + +void __noreturn efi_stub_entry(efi_handle_t handle, + efi_system_table_t *sys_table_arg, + struct boot_params *boot_params); + +/* + * Because the x86 boot code expects to be passed a boot_params we + * need to create one ourselves (usually the bootloader would create + * one for us). + */ +efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, + efi_system_table_t *sys_table_arg) +{ + struct boot_params *boot_params; + struct setup_header *hdr; + efi_loaded_image_t *image; + void *image_base; + efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; + int options_size = 0; + efi_status_t status; + char *cmdline_ptr; + unsigned long ramdisk_addr; + unsigned long ramdisk_size; + + sys_table = sys_table_arg; + + /* Check if we were booted by the EFI firmware */ + if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) + efi_exit(handle, EFI_INVALID_PARAMETER); + + status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image); + if (status != EFI_SUCCESS) { + efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n"); + efi_exit(handle, status); + } + + image_base = efi_table_attr(image, image_base); + image_offset = (void *)startup_32 - image_base; + + hdr = &((struct boot_params *)image_base)->hdr; + + status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX); + if (status != EFI_SUCCESS) { + efi_printk("Failed to allocate lowmem for boot params\n"); + efi_exit(handle, status); + } + + memset(boot_params, 0x0, 0x4000); + + hdr = &boot_params->hdr; + + /* Copy the second sector to boot_params */ + memcpy(&hdr->jump, image_base + 512, 512); + + /* + * Fill out some of the header fields ourselves because the + * EFI firmware loader doesn't load the first sector. + */ + hdr->root_flags = 1; + hdr->vid_mode = 0xffff; + hdr->boot_flag = 0xAA55; + + hdr->type_of_loader = 0x21; + + /* Convert unicode cmdline to ascii */ + cmdline_ptr = efi_convert_cmdline(image, &options_size, ULONG_MAX); + if (!cmdline_ptr) + goto fail; + + hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; + /* Fill in upper bits of command line address, NOP on 32 bit */ + boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; + + hdr->ramdisk_image = 0; + hdr->ramdisk_size = 0; + + if (efi_is_native()) { + status = efi_parse_options(cmdline_ptr); + if (status != EFI_SUCCESS) + goto fail2; + + if (!noinitrd()) { + status = efi_load_initrd(image, &ramdisk_addr, + &ramdisk_size, + hdr->initrd_addr_max, + ULONG_MAX); + if (status != EFI_SUCCESS) + goto fail2; + hdr->ramdisk_image = ramdisk_addr & 0xffffffff; + hdr->ramdisk_size = ramdisk_size & 0xffffffff; + boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32; + boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32; + } + } + + efi_stub_entry(handle, sys_table, boot_params); + /* not reached */ + +fail2: + efi_free(options_size, (unsigned long)cmdline_ptr); +fail: + efi_free(0x4000, (unsigned long)boot_params); + + efi_exit(handle, status); +} + +static void add_e820ext(struct boot_params *params, + struct setup_data *e820ext, u32 nr_entries) +{ + struct setup_data *data; + + e820ext->type = SETUP_E820_EXT; + e820ext->len = nr_entries * sizeof(struct boot_e820_entry); + e820ext->next = 0; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + if (data) + data->next = (unsigned long)e820ext; + else + params->hdr.setup_data = (unsigned long)e820ext; +} + +static efi_status_t +setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size) +{ + struct boot_e820_entry *entry = params->e820_table; + struct efi_info *efi = ¶ms->efi_info; + struct boot_e820_entry *prev = NULL; + u32 nr_entries; + u32 nr_desc; + int i; + + nr_entries = 0; + nr_desc = efi->efi_memmap_size / efi->efi_memdesc_size; + + for (i = 0; i < nr_desc; i++) { + efi_memory_desc_t *d; + unsigned int e820_type = 0; + unsigned long m = efi->efi_memmap; + +#ifdef CONFIG_X86_64 + m |= (u64)efi->efi_memmap_hi << 32; +#endif + + d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i); + switch (d->type) { + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + case EFI_PAL_CODE: + e820_type = E820_TYPE_RESERVED; + break; + + case EFI_UNUSABLE_MEMORY: + e820_type = E820_TYPE_UNUSABLE; + break; + + case EFI_ACPI_RECLAIM_MEMORY: + e820_type = E820_TYPE_ACPI; + break; + + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + if (efi_soft_reserve_enabled() && + (d->attribute & EFI_MEMORY_SP)) + e820_type = E820_TYPE_SOFT_RESERVED; + else + e820_type = E820_TYPE_RAM; + break; + + case EFI_ACPI_MEMORY_NVS: + e820_type = E820_TYPE_NVS; + break; + + case EFI_PERSISTENT_MEMORY: + e820_type = E820_TYPE_PMEM; + break; + + default: + continue; + } + + /* Merge adjacent mappings */ + if (prev && prev->type == e820_type && + (prev->addr + prev->size) == d->phys_addr) { + prev->size += d->num_pages << 12; + continue; + } + + if (nr_entries == ARRAY_SIZE(params->e820_table)) { + u32 need = (nr_desc - i) * sizeof(struct e820_entry) + + sizeof(struct setup_data); + + if (!e820ext || e820ext_size < need) + return EFI_BUFFER_TOO_SMALL; + + /* boot_params map full, switch to e820 extended */ + entry = (struct boot_e820_entry *)e820ext->data; + } + + entry->addr = d->phys_addr; + entry->size = d->num_pages << PAGE_SHIFT; + entry->type = e820_type; + prev = entry++; + nr_entries++; + } + + if (nr_entries > ARRAY_SIZE(params->e820_table)) { + u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table); + + add_e820ext(params, e820ext, nr_e820ext); + nr_entries -= nr_e820ext; + } + + params->e820_entries = (u8)nr_entries; + + return EFI_SUCCESS; +} + +static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, + u32 *e820ext_size) +{ + efi_status_t status; + unsigned long size; + + size = sizeof(struct setup_data) + + sizeof(struct e820_entry) * nr_desc; + + if (*e820ext) { + efi_bs_call(free_pool, *e820ext); + *e820ext = NULL; + *e820ext_size = 0; + } + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)e820ext); + if (status == EFI_SUCCESS) + *e820ext_size = size; + + return status; +} + +static efi_status_t allocate_e820(struct boot_params *params, + struct setup_data **e820ext, + u32 *e820ext_size) +{ + unsigned long map_size, desc_size, buff_size; + struct efi_boot_memmap boot_map; + efi_memory_desc_t *map; + efi_status_t status; + __u32 nr_desc; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(&boot_map); + if (status != EFI_SUCCESS) + return status; + + nr_desc = buff_size / desc_size; + + if (nr_desc > ARRAY_SIZE(params->e820_table)) { + u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); + + status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); + if (status != EFI_SUCCESS) + return status; + } + + return EFI_SUCCESS; +} + +struct exit_boot_struct { + struct boot_params *boot_params; + struct efi_info *efi; +}; + +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, + void *priv) +{ + const char *signature; + struct exit_boot_struct *p = priv; + + signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE + : EFI32_LOADER_SIGNATURE; + memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); + + p->efi->efi_systab = (unsigned long)efi_system_table(); + p->efi->efi_memdesc_size = *map->desc_size; + p->efi->efi_memdesc_version = *map->desc_ver; + p->efi->efi_memmap = (unsigned long)*map->map; + p->efi->efi_memmap_size = *map->map_size; + +#ifdef CONFIG_X86_64 + p->efi->efi_systab_hi = (unsigned long)efi_system_table() >> 32; + p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; +#endif + + return EFI_SUCCESS; +} + +static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) +{ + unsigned long map_sz, key, desc_size, buff_size; + efi_memory_desc_t *mem_map; + struct setup_data *e820ext = NULL; + __u32 e820ext_size = 0; + efi_status_t status; + __u32 desc_version; + struct efi_boot_memmap map; + struct exit_boot_struct priv; + + map.map = &mem_map; + map.map_size = &map_sz; + map.desc_size = &desc_size; + map.desc_ver = &desc_version; + map.key_ptr = &key; + map.buff_size = &buff_size; + priv.boot_params = boot_params; + priv.efi = &boot_params->efi_info; + + status = allocate_e820(boot_params, &e820ext, &e820ext_size); + if (status != EFI_SUCCESS) + return status; + + /* Might as well exit boot services now */ + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); + if (status != EFI_SUCCESS) + return status; + + /* Historic? */ + boot_params->alt_mem_k = 32 * 1024; + + status = setup_e820(boot_params, e820ext, e820ext_size); + if (status != EFI_SUCCESS) + return status; + + return EFI_SUCCESS; +} + +/* + * On success, we return the address of startup_32, which has potentially been + * relocated by efi_relocate_kernel. + * On failure, we exit to the firmware via efi_exit instead of returning. + */ +unsigned long efi_main(efi_handle_t handle, + efi_system_table_t *sys_table_arg, + struct boot_params *boot_params) +{ + unsigned long bzimage_addr = (unsigned long)startup_32; + unsigned long buffer_start, buffer_end; + struct setup_header *hdr = &boot_params->hdr; + efi_status_t status; + unsigned long cmdline_paddr; + + sys_table = sys_table_arg; + + /* Check if we were booted by the EFI firmware */ + if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) + efi_exit(handle, EFI_INVALID_PARAMETER); + + /* + * If the kernel isn't already loaded at a suitable address, + * relocate it. + * + * It must be loaded above LOAD_PHYSICAL_ADDR. + * + * The maximum address for 64-bit is 1 << 46 for 4-level paging. This + * is defined as the macro MAXMEM, but unfortunately that is not a + * compile-time constant if 5-level paging is configured, so we instead + * define our own macro for use here. + * + * For 32-bit, the maximum address is complicated to figure out, for + * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what + * KASLR uses. + * + * Also relocate it if image_offset is zero, i.e. we weren't loaded by + * LoadImage, but we are not aligned correctly. + */ + + buffer_start = ALIGN(bzimage_addr - image_offset, + hdr->kernel_alignment); + buffer_end = buffer_start + hdr->init_size; + + if ((buffer_start < LOAD_PHYSICAL_ADDR) || + (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) || + (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) || + (image_offset == 0 && !IS_ALIGNED(bzimage_addr, + hdr->kernel_alignment))) { + status = efi_relocate_kernel(&bzimage_addr, + hdr->init_size, hdr->init_size, + hdr->pref_address, + hdr->kernel_alignment, + LOAD_PHYSICAL_ADDR); + if (status != EFI_SUCCESS) { + efi_printk("efi_relocate_kernel() failed!\n"); + goto fail; + } + /* + * Now that we've copied the kernel elsewhere, we no longer + * have a set up block before startup_32(), so reset image_offset + * to zero in case it was set earlier. + */ + image_offset = 0; + } + + /* + * efi_pe_entry() may have been called before efi_main(), in which + * case this is the second time we parse the cmdline. This is ok, + * parsing the cmdline multiple times does not have side-effects. + */ + cmdline_paddr = ((u64)hdr->cmd_line_ptr | + ((u64)boot_params->ext_cmd_line_ptr << 32)); + efi_parse_options((char *)cmdline_paddr); + + /* + * At this point, an initrd may already have been loaded, either by + * the bootloader and passed via bootparams, or loaded from a initrd= + * command line option by efi_pe_entry() above. In either case, we + * permit an initrd loaded from the LINUX_EFI_INITRD_MEDIA_GUID device + * path to supersede it. + */ + if (!noinitrd()) { + unsigned long addr, size; + + status = efi_load_initrd_dev_path(&addr, &size, ULONG_MAX); + if (status == EFI_SUCCESS) { + hdr->ramdisk_image = (u32)addr; + hdr->ramdisk_size = (u32)size; + boot_params->ext_ramdisk_image = (u64)addr >> 32; + boot_params->ext_ramdisk_size = (u64)size >> 32; + } else if (status != EFI_NOT_FOUND) { + efi_printk("efi_load_initrd_dev_path() failed!\n"); + goto fail; + } + } + + /* + * If the boot loader gave us a value for secure_boot then we use that, + * otherwise we ask the BIOS. + */ + if (boot_params->secure_boot == efi_secureboot_mode_unset) + boot_params->secure_boot = efi_get_secureboot(); + + /* Ask the firmware to clear memory on unclean shutdown */ + efi_enable_reset_attack_mitigation(); + + efi_random_get_seed(); + + efi_retrieve_tpm2_eventlog(); + + setup_graphics(boot_params); + + setup_efi_pci(boot_params); + + setup_quirks(boot_params); + + status = exit_boot(boot_params, handle); + if (status != EFI_SUCCESS) { + efi_printk("exit_boot() failed!\n"); + goto fail; + } + + return bzimage_addr; +fail: + efi_printk("efi_main() failed!\n"); + + efi_exit(handle, status); +} diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 58452fde92cc..5737cb0fcd44 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -13,6 +13,7 @@ #include <asm/early_ioremap.h> static int __initdata tbl_size; +unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR; /* * Reserve the memory associated with the Memory Attributes configuration @@ -22,13 +23,13 @@ int __init efi_memattr_init(void) { efi_memory_attributes_table_t *tbl; - if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR) + if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR) return 0; - tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl)); + tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl)); if (!tbl) { pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n", - efi.mem_attr_table); + efi_mem_attr_table); return -ENOMEM; } @@ -39,7 +40,7 @@ int __init efi_memattr_init(void) } tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; - memblock_reserve(efi.mem_attr_table, tbl_size); + memblock_reserve(efi_mem_attr_table, tbl_size); set_bit(EFI_MEM_ATTR, &efi.flags); unmap: @@ -147,10 +148,10 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm, if (WARN_ON(!efi_enabled(EFI_MEMMAP))) return 0; - tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB); + tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB); if (!tbl) { pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n", - efi.mem_attr_table); + efi_mem_attr_table); return -ENOMEM; } diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c index 7effff969eb9..73089a24f04b 100644 --- a/drivers/firmware/efi/reboot.c +++ b/drivers/firmware/efi/reboot.c @@ -15,7 +15,7 @@ void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) const char *str[] = { "cold", "warm", "shutdown", "platform" }; int efi_mode, cap_reset_mode; - if (!efi_enabled(EFI_RUNTIME_SERVICES)) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM)) return; switch (reboot_mode) { @@ -64,7 +64,7 @@ static void efi_power_off(void) static int __init efi_shutdown_init(void) { - if (!efi_enabled(EFI_RUNTIME_SERVICES)) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM)) return -ENODEV; if (efi_poweroff_required()) { diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 65fffaa22210..1410beaef5c3 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -40,9 +40,9 @@ * code doesn't get too cluttered: */ #define efi_call_virt(f, args...) \ - efi_call_virt_pointer(efi.systab->runtime, f, args) + efi_call_virt_pointer(efi.runtime, f, args) #define __efi_call_virt(f, args...) \ - __efi_call_virt_pointer(efi.systab->runtime, f, args) + __efi_call_virt_pointer(efi.runtime, f, args) struct efi_runtime_work efi_rts_work; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 436d1776bc7b..5f2a4d162795 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -1071,7 +1071,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_iter_end); * entry on the list. It is safe for @func to remove entries in the * list via efivar_entry_delete(). * - * You MUST call efivar_enter_iter_begin() before this function, and + * You MUST call efivar_entry_iter_begin() before this function, and * efivar_entry_iter_end() afterwards. * * It is possible to begin iteration from an arbitrary entry within diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 03b43b7a6d1d..f71eaa5bf52d 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -29,6 +29,7 @@ struct imx_sc_chan { struct mbox_client cl; struct mbox_chan *ch; int idx; + struct completion tx_done; }; struct imx_sc_ipc { @@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc) } EXPORT_SYMBOL(imx_scu_get_handle); +/* Callback called when the word of a message is ack-ed, eg read by SCU */ +static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r) +{ + struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl); + + complete(&sc_chan->tx_done); +} + static void imx_scu_rx_callback(struct mbox_client *c, void *msg) { struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl); @@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) for (i = 0; i < hdr->size; i++) { sc_chan = &sc_ipc->chans[i % 4]; + + /* + * SCU requires that all messages words are written + * sequentially but linux MU driver implements multiple + * independent channels for each register so ordering between + * different channels must be ensured by SCU API interface. + * + * Wait for tx_done before every send to ensure that no + * queueing happens at the mailbox channel level. + */ + wait_for_completion(&sc_chan->tx_done); + reinit_completion(&sc_chan->tx_done); + ret = mbox_send_message(sc_chan->ch, &data[i]); if (ret < 0) return ret; @@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev) cl->knows_txdone = true; cl->rx_callback = imx_scu_rx_callback; + /* Initial tx_done completion as "done" */ + cl->tx_done = imx_scu_tx_done; + init_completion(&sc_chan->tx_done); + complete(&sc_chan->tx_done); + sc_chan->sc_ipc = sc_ipc; sc_chan->idx = i % 4; sc_chan->ch = mbox_request_channel_byname(cl, chan_name); diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c index 4b56a587dacd..d073cb3ce699 100644 --- a/drivers/firmware/imx/misc.c +++ b/drivers/firmware/imx/misc.c @@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl { u32 ctrl; u32 val; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_cpu_start { struct imx_sc_rpc_msg hdr; @@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start { u32 address_lo; u16 resource; u8 enable; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 ctrl; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_resp_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 val; -} __packed; +} __packed __aligned(4); /* * This function sets a miscellaneous control value. diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index b556612207e5..af3ae0087de4 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode { struct imx_sc_rpc_msg hdr; u16 resource; u8 mode; -} __packed; +} __packed __aligned(4); #define IMX_SCU_PD_NAME_SIZE 20 struct imx_sc_pm_domain { diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c index 4adeb7a2bdf5..715a45442d1c 100644 --- a/drivers/firmware/pcdp.c +++ b/drivers/firmware/pcdp.c @@ -80,6 +80,8 @@ setup_vga_console(struct pcdp_device *dev) #endif } +extern unsigned long hcdp_phys; + int __init efi_setup_pcdp_console(char *cmdline) { @@ -89,11 +91,11 @@ efi_setup_pcdp_console(char *cmdline) int i, serial = 0; int rc = -ENODEV; - if (efi.hcdp == EFI_INVALID_TABLE_ADDR) + if (hcdp_phys == EFI_INVALID_TABLE_ADDR) return -ENODEV; - pcdp = early_memremap(efi.hcdp, 4096); - printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); + pcdp = early_memremap(hcdp_phys, 4096); + printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys); if (strstr(cmdline, "console=hcdp")) { if (pcdp->rev < 3) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 6a445397771c..873841af8d57 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -84,7 +84,7 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus, /* Try to power down all CPUs in the mask. */ for_each_cpu(cpu, cpus) { - int ret = cpu_down(cpu); + int ret = remove_cpu(cpu); /* * cpu_down() checks the number of online CPUs before the TOS @@ -116,7 +116,7 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus, /* Try to power up all the CPUs that have been offlined. */ for_each_cpu(cpu, offlined_cpus) { - int ret = cpu_up(cpu); + int ret = add_cpu(cpu); if (ret != 0) { pr_err("Error occurred (%d) while trying " diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig index 92ce6d85802c..4cc0e630ab79 100644 --- a/drivers/fsi/Kconfig +++ b/drivers/fsi/Kconfig @@ -55,6 +55,7 @@ config FSI_MASTER_AST_CF config FSI_MASTER_ASPEED tristate "FSI ASPEED master" + depends on HAS_IOMEM help This option enables a FSI master that is present behind an OPB bridge in the AST2600. diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 31fee5e918b7..0017367e94ee 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,18 +21,21 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" -#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l -#define QUIRK_NO_WAKEUP 0x02l - static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); -static int honor_wakeup = -1; -module_param(honor_wakeup, int, 0444); -MODULE_PARM_DESC(honor_wakeup, - "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); +static char *ignore_wake; +module_param(ignore_wake, charp, 0444); +MODULE_PARM_DESC(ignore_wake, + "controller@pin combos on which to ignore the ACPI wake flag " + "ignore_wake=controller@pin[,controller@pin[,...]]"); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; +}; /** * struct acpi_gpio_event - ACPI GPIO event handler data @@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) acpi_gpiochip_request_irq(acpi_gpio, event); } +static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) +{ + const char *controller, *pin_str; + int len, pin; + char *endp; + + controller = ignore_wake; + while (controller) { + pin_str = strchr(controller, '@'); + if (!pin_str) + goto err; + + len = pin_str - controller; + if (len == strlen(controller_in) && + strncmp(controller, controller_in, len) == 0) { + pin = simple_strtoul(pin_str + 1, &endp, 10); + if (*endp != 0 && *endp != ',') + goto err; + + if (pin == pin_in) + return true; + } + + controller = strchr(controller, ','); + if (controller) + controller++; + } + + return false; +err: + pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n", + ignore_wake); + return false; +} + +static bool acpi_gpio_irq_is_wake(struct device *parent, + struct acpi_resource_gpio *agpio) +{ + int pin = agpio->pin_table[0]; + + if (agpio->wake_capable != ACPI_WAKE_CAPABLE) + return false; + + if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) { + dev_info(parent, "Ignoring wakeup on pin %d\n", pin); + return false; + } + + return true; +} + /* Always returns AE_OK so that we keep looping over the resources */ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, void *context) @@ -289,7 +343,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); event->pin = pin; event->desc = desc; @@ -1328,7 +1382,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), }, - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, }, { /* @@ -1341,16 +1397,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), }, - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, }, { /* - * Various HP X2 10 Cherry Trail models use an external - * embedded-controller connected via I2C + an ACPI GPIO - * event handler. The embedded controller generates various - * spurious wakeup events when suspended. So disable wakeup - * for its handler (it uses the only ACPI GPIO event handler). - * This breaks wakeup when opening the lid, the user needs + * HP X2 10 models with Cherry Trail SoC + TI PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + * When suspending by closing the LID, the power to the USB + * keyboard is turned off, causing INT0002 ACPI events to + * trigger once the XHCI controller notices the keyboard is + * gone. So INT0002 events cause spurious wakeups too. Ignoring + * EC wakes breaks wakeup when opening the lid, the user needs * to press the power-button to wakeup the system. The * alternative is suspend simply not working, which is worse. */ @@ -1358,33 +1418,61 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), }, - .driver_data = (void *)QUIRK_NO_WAKEUP, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0,INT0002:00@2", + }, + }, + { + /* + * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FC:02 pin 28, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "815D"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@28", + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "813E"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0", + }, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct acpi_gpiolib_dmi_quirk *quirk = NULL; const struct dmi_system_id *id; - long quirks = 0; id = dmi_first_match(gpiolib_acpi_quirks); if (id) - quirks = (long)id->driver_data; + quirk = id->driver_data; if (run_edge_events_on_boot < 0) { - if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) + if (quirk && quirk->no_edge_events_on_boot) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } - if (honor_wakeup < 0) { - if (quirks & QUIRK_NO_WAKEUP) - honor_wakeup = 0; - else - honor_wakeup = 1; - } + if (ignore_wake == NULL && quirk && quirk->ignore_wake) + ignore_wake = quirk->ignore_wake; return 0; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4d0106ceeba7..00fb91feba70 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2306,9 +2306,16 @@ static void gpiochip_irq_disable(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + /* + * Since we override .irq_disable() we need to mimic the + * behaviour of __irq_disable() in irq/chip.c. + * First call .irq_disable() if it exists, else mimic the + * behaviour of mask_irq() which calls .irq_mask() if + * it exists. + */ if (chip->irq.irq_disable) chip->irq.irq_disable(d); - else + else if (chip->irq.chip->irq_mask) chip->irq.chip->irq_mask(d); gpiochip_disable_irq(chip, d->hwirq); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f24ed9a1a3e5..337d7cdce8e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - if (size & 3 || *pos & 3) + if (size > 4096 || size & 3 || *pos & 3) return -EINVAL; /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); + offset = (*pos & GENMASK_ULL(11, 0)) >> 2; se = (*pos & GENMASK_ULL(19, 12)) >> 12; sh = (*pos & GENMASK_ULL(27, 20)) >> 20; cu = (*pos & GENMASK_ULL(35, 28)) >> 28; @@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = data[offset++]; + value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { result = r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39cd545976b7..b8975857d60d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, if (r) goto out; + amdgpu_fbdev_set_suspend(tmp_adev, 0); + /* must succeed. */ amdgpu_ras_resume(tmp_adev); @@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); + amdgpu_fbdev_set_suspend(adev, 1); + /* disable ras on ALL IPs */ if (!(in_ras_intr && !use_baco) && amdgpu_device_ip_need_full_reset(tmp_adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 94e2fd758e01..42f4febe24c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1389,7 +1389,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_ATOMIC | + DRIVER_ATOMIC | DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index d3c27a3c43f6..7546da0cc70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -195,6 +195,7 @@ struct amdgpu_gmc { uint32_t srbm_soft_reset; bool prt_warning; uint64_t stolen_size; + uint32_t sdpif_register; /* apertures */ u64 shared_aperture_start; u64 shared_aperture_end; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 3a1570dafe34..146f96661b6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1013,6 +1013,30 @@ static int psp_dtm_initialize(struct psp_context *psp) return 0; } +static int psp_dtm_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the unloading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { /* @@ -1037,7 +1061,7 @@ static int psp_dtm_terminate(struct psp_context *psp) if (!psp->dtm_context.dtm_initialized) return 0; - ret = psp_hdcp_unload(psp); + ret = psp_dtm_unload(psp); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dee446278417..c6e9885c071f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -974,7 +974,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) /* Map SG to device */ r = -ENOMEM; nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - if (nents != ttm->sg->nents) + if (nents == 0) goto release_sg; /* convert SG to linear array of pages and dma addresses */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1785fdad6ecb..02702597ddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -52,7 +52,7 @@ * 1. Primary ring * 2. Async ring */ -#define GFX10_NUM_GFX_RINGS 2 +#define GFX10_NUM_GFX_RINGS_NV1X 1 #define GFX10_MEC_HPD_SIZE 2048 #define F32_CE_PROGRAM_RAM_SIZE 65536 @@ -1304,7 +1304,7 @@ static int gfx_v10_0_sw_init(void *handle) case CHIP_NAVI14: case CHIP_NAVI12: adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 2; + adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; @@ -2710,18 +2710,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_commit(ring); /* submit cs packet to copy state 0 to next available state */ - ring = &adev->gfx.gfx_ring[1]; - r = amdgpu_ring_alloc(ring, 2); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); - return r; - } - - amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); - amdgpu_ring_write(ring, 0); + if (adev->gfx.num_gfx_rings > 1) { + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + r = amdgpu_ring_alloc(ring, 2); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } - amdgpu_ring_commit(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); + } return 0; } @@ -2818,39 +2820,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ - mutex_lock(&adev->srbm_mutex); - gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - ring = &adev->gfx.gfx_ring[1]; - rb_bufsz = order_base_2(ring->ring_size / 8); - tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - /* Initialize the ring buffer's write pointers */ - ring->wptr = 0; - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ - rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & - CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, - upper_32_bits(wptr_gpu_addr)); - - mdelay(1); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - - rb_addr = ring->gpu_addr >> 8; - WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); - WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); - - gfx_v10_0_cp_gfx_set_doorbell(adev, ring); - mutex_unlock(&adev->srbm_mutex); - + if (adev->gfx.num_gfx_rings > 1) { + mutex_lock(&adev->srbm_mutex); + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); + /* Set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); + WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); + + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + } /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); @@ -3513,6 +3517,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3923,11 +3928,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -3964,7 +3971,8 @@ static int gfx_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X; + adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; gfx_v10_0_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b33a4eb39193..889154a78c4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1193,6 +1193,14 @@ static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) return false; } +static bool is_raven_kicker(struct amdgpu_device *adev) +{ + if (adev->pm.fw_version >= 0x41e2b) + return true; + else + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) @@ -1205,9 +1213,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) break; case CHIP_RAVEN: if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && - ((adev->gfx.rlc_fw_version != 106 && + ((!is_raven_kicker(adev) && adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_feature_version < 1) || !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; @@ -3656,6 +3663,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3959,6 +3967,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { uint32_t tmp, lsb, msb, i = 0; @@ -3977,6 +3986,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); } mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); return clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 90216abf14a4..cc0c273a86f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1272,6 +1272,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) } /** + * gmc_v9_0_restore_registers - restores regs + * + * @adev: amdgpu_device pointer + * + * This restores register values, saved at suspend. + */ +static void gmc_v9_0_restore_registers(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_RAVEN) + WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); +} + +/** * gmc_v9_0_gart_enable - gart enable * * @adev: amdgpu_device pointer @@ -1377,6 +1390,20 @@ static int gmc_v9_0_hw_init(void *handle) } /** + * gmc_v9_0_save_registers - saves regs + * + * @adev: amdgpu_device pointer + * + * This saves potential register values that should be + * restored upon resume + */ +static void gmc_v9_0_save_registers(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_RAVEN) + adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); +} + +/** * gmc_v9_0_gart_disable - gart disable * * @adev: amdgpu_device pointer @@ -1412,9 +1439,16 @@ static int gmc_v9_0_hw_fini(void *handle) static int gmc_v9_0_suspend(void *handle) { + int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return gmc_v9_0_hw_fini(adev); + r = gmc_v9_0_hw_fini(adev); + if (r) + return r; + + gmc_v9_0_save_registers(adev); + + return 0; } static int gmc_v9_0_resume(void *handle) @@ -1422,6 +1456,7 @@ static int gmc_v9_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v9_0_restore_registers(adev); r = gmc_v9_0_hw_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index ff2e6e1ccde7..6173951db7b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE); if (enable) { - if (jpeg_v2_0_is_idle(handle)) + if (!jpeg_v2_0_is_idle(handle)) return -EBUSY; jpeg_v2_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index c6d046df4b70..c04c2078a7c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, continue; if (enable) { - if (jpeg_v2_5_is_idle(handle)) + if (!jpeg_v2_5_is_idle(handle)) return -EBUSY; jpeg_v2_5_enable_clock_gating(adev, i); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 15f3424a1ff7..d8945c31b622 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -89,6 +89,13 @@ #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +/* for Vega20/arcturus regiter offset change */ +#define mmROM_INDEX_VG20 0x00e4 +#define mmROM_INDEX_VG20_BASE_IDX 0 +#define mmROM_DATA_VG20 0x00e5 +#define mmROM_DATA_VG20_BASE_IDX 0 + /* * Indirect registers accessor */ @@ -272,7 +279,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev) static u32 soc15_get_xclk(struct amdgpu_device *adev) { - return adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; + + if (adev->asic_type == CHIP_RAVEN) + return reference_clock / 4; + + return reference_clock; } @@ -304,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + uint32_t rom_index_offset; + uint32_t rom_data_offset; if (bios == NULL) return false; @@ -316,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); + break; + default: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); + break; + } + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 71f61afdc655..09b0572b838d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c387c81f8695..b7f17342bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v2_0_is_idle(handle)) + if (!vcn_v2_0_is_idle(handle)) return -EBUSY; vcn_v2_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 2d64ba1adf99..678253d81154 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, return 0; if (enable) { - if (vcn_v2_5_is_idle(handle)) + if (!vcn_v2_5_is_idle(handle)) return -EBUSY; vcn_v2_5_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 63e8a12a74bc..6240259b3a93 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state), + acrtc_state->active_planes); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); drm_crtc_handle_vblank(&acrtc->base); @@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) &acrtc_state->vrr_params.adjust); } - if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { + /* + * If there aren't any active_planes then DCH HUBP may be clock-gated. + * In that case, pageflip completion interrupts won't fire and pageflip + * completion events won't get delivered. Prevent this by sending + * pending pageflip events from here if a flip is still pending. + * + * If any planes are enabled, use dm_pflip_high_irq() instead, to + * avoid race conditions between flip programming and completion, + * which could cause too early flip completion events. + */ + if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + acrtc_state->active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); acrtc->event = NULL; @@ -1422,6 +1434,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_kms_helper_hotplug_event(dev); } +static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (!is_support_sw_smu(adev)) + return 0; + + /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends + * on window driver dc implementation. + * For Navi1x, clock settings of dcn watermarks are fixed. the settings + * should be passed to smu during boot up and resume from s3. + * boot up: dc calculate dcn watermark clock settings within dc_create, + * dcn20_resource_construct + * then call pplib functions below to pass the settings to smu: + * smu_set_watermarks_for_clock_ranges + * smu_set_watermarks_table + * navi10_set_watermarks_table + * smu_write_watermarks_table + * + * For Renoir, clock settings of dcn watermark are also fixed values. + * dc has implemented different flow for window driver: + * dc_hardware_init / dc_set_power_state + * dcn10_init_hw + * notify_wm_ranges + * set_wm_ranges + * -- Linux + * smu_set_watermarks_for_clock_ranges + * renoir_set_watermarks_table + * smu_write_watermarks_table + * + * For Linux, + * dc_hardware_init -> amdgpu_dm_init + * dc_set_power_state --> dm_resume + * + * therefore, this function apply to navi10/12/14 but not Renoir + * * + */ + switch(adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + break; + default: + return 0; + } + + mutex_lock(&smu->mutex); + + /* pass data to smu controller */ + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + + if (ret) { + mutex_unlock(&smu->mutex); + DRM_ERROR("Failed to update WMTABLE!\n"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return 0; +} + /** * dm_hw_init() - Initialize DC device * @handle: The base driver device containing the amdgpu_dm device. @@ -1700,6 +1779,8 @@ static int dm_resume(void *handle) amdgpu_dm_irq_resume_late(adev); + amdgpu_dm_smu_write_watermarks_table(adev); + return 0; } @@ -1911,7 +1992,7 @@ static void handle_hpd_irq(void *param) mutex_lock(&aconnector->hpd_lock); #ifdef CONFIG_DRM_AMD_DC_HDCP - if (adev->asic_type >= CHIP_RAVEN) + if (adev->dm.hdcp_workqueue) hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); #endif if (aconnector->fake_enable) @@ -2088,8 +2169,10 @@ static void handle_hpd_rx_irq(void *param) } } #ifdef CONFIG_DRM_AMD_DC_HDCP - if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) - hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { + if (adev->dm.hdcp_workqueue) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); + } #endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) @@ -5702,7 +5785,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_vrr_capable_property( &aconnector->base); #ifdef CONFIG_DRM_AMD_DC_HDCP - if (adev->asic_type >= CHIP_RAVEN) + if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); #endif } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5672f7765919..da73161043d5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; + aconnector->dc_link->cur_link_settings.lane_count = 0; } drm_connector_unregister(connector); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb731c1d30b1..fd9e69634c50 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link) sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); + /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; + + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, + sizeof(str_mbp_2017))) { + link->reported_link_cap.link_rate = 0x0c; + } + } + core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index f36a0d8cedfe..446ba0a7a4b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -840,8 +840,8 @@ static void hubbub1_det_request_size( hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); - swath_bytes_horz_wc = height * blk256_height * bpe; - swath_bytes_vert_wc = width * blk256_width * bpe; + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index d51e02fdab4d..5e640f17d3d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 85f90f3e24cb..e310d67c399a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { .use_urgent_burst_bw = 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 8, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_soc; + if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 4861aa5c59ae..fddbd59bf4f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index f730b94ac3c0..55246711700b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -46,8 +46,8 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) - status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) && - HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ? + status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) && + HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[2]) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; else diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h index b6f74bf4af02..27bb8c1ab858 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h @@ -7376,6 +7376,8 @@ #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2 +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 // addressBlock: dce_dc_fmt4_dispdec // base address: 0x2000 diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 99ad4ddbe12f..96e81c7bc266 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0; - if (min <= 0 && max <= 0) + if (min < 0 && max < 0) return -EINVAL; if (!smu_clk_dpm_is_enabled(smu, clk_type)) @@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + + if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } } mutex_unlock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 0d73a49166af..aed4d6e60907 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1063,15 +1063,6 @@ static int navi10_display_config_changed(struct smu_context *smu) int ret = 0; if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { - ret = smu_write_watermarks_table(smu); - if (ret) - return ret; - - smu->watermarks_bitmap |= WATERMARKS_LOADED; - } - - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, @@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu, *clock_ranges) { int i; + int ret = 0; Watermarks_t *table = watermarks; if (!table || !clock_ranges) @@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu, clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + + /* pass data to smu controller */ + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + if (ret) { + pr_err("Failed to update WMTABLE!"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 861e6410363b..3ad0f4aa3aa3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -111,8 +111,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { CLK_MAP(GFXCLK, CLOCK_GFXCLK), CLK_MAP(SCLK, CLOCK_GFXCLK), CLK_MAP(SOCCLK, CLOCK_SOCCLK), - CLK_MAP(UCLK, CLOCK_UMCCLK), - CLK_MAP(MCLK, CLOCK_UMCCLK), + CLK_MAP(UCLK, CLOCK_FCLK), + CLK_MAP(MCLK, CLOCK_FCLK), }; static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { @@ -280,7 +280,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; case SMU_MCLK: count = NUM_MEMCLK_DPM_LEVELS; - cur_value = metrics.ClockFrequency[CLOCK_UMCCLK]; + cur_value = metrics.ClockFrequency[CLOCK_FCLK]; break; case SMU_DCEFCLK: count = NUM_DCFCLK_DPM_LEVELS; @@ -806,9 +806,10 @@ static int renoir_set_watermarks_table( clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + /* pass data to smu controller */ - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { ret = smu_write_watermarks_table(smu); if (ret) { pr_err("Failed to update WMTABLE!"); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 0dc49479a7eb..c9e5ce135fd4 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -898,6 +898,9 @@ int smu_v11_0_system_features_control(struct smu_context *smu, if (ret) return ret; + bitmap_zero(feature->enabled, feature->feature_num); + bitmap_zero(feature->supported, feature->feature_num); + if (en) { ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) @@ -907,9 +910,6 @@ int smu_v11_0_system_features_control(struct smu_context *smu, feature->feature_num); bitmap_copy(feature->supported, (unsigned long *)&feature_mask, feature->feature_num); - } else { - bitmap_zero(feature->enabled, feature->feature_num); - bitmap_zero(feature->supported, feature->feature_num); } return ret; @@ -978,8 +978,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; int ret = 0; - max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), + if (!smu->smu_table.max_sustainable_clocks) + max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); + else + max_sustainable_clocks = smu->smu_table.max_sustainable_clocks; + smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks; max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 870e6db2907e..518e6597bf2d 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -458,9 +458,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ { int ret = 0; - if (max < min) - return -EINVAL; - switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index ea5cd1e17304..e7933930a657 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = { MODULE_DEVICE_TABLE(of, komeda_of_match); -static int komeda_rt_pm_suspend(struct device *dev) +static int __maybe_unused komeda_rt_pm_suspend(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); return komeda_dev_suspend(mdrv->mdev); } -static int komeda_rt_pm_resume(struct device *dev) +static int __maybe_unused komeda_rt_pm_resume(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index b615b7dfdd9d..a4fc4e6aee39 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) { - DRM_ERROR("Cannot request framebuffer\n"); - return -EBUSY; - } + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 56f55c53abfd..2dfa2fd2a23b 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345) if (err) return err; - dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd); - dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]); + dpcd[0] = dp_bw; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); if (err) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 67fca439bbfb..24965e53d351 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ - switch (hdmi->hdmi_data.enc_out_encoding) { - case V4L2_YCBCR_ENC_601: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else + if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { + switch (hdmi->hdmi_data.enc_out_encoding) { + case V4L2_YCBCR_ENC_601: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + case V4L2_YCBCR_ENC_709: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; + break; + default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + } + } else { + frame.colorimetry = HDMI_COLORIMETRY_NONE; frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; - case V4L2_YCBCR_ENC_709: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else - frame.colorimetry = HDMI_COLORIMETRY_ITU_709; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; - break; - default: /* Carries no data */ - frame.colorimetry = HDMI_COLORIMETRY_ITU_601; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } frame.scan_mode = HDMI_SCAN_MODE_NONE; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 3709e5ace724..fbdb42d4e772 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -297,7 +297,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr, static int tc_aux_wait_busy(struct tc_data *tc) { - return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000); + return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000); } static int tc_aux_write_data(struct tc_data *tc, const void *data, @@ -640,7 +640,7 @@ static int tc_aux_link_setup(struct tc_data *tc) if (ret) goto err; - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000); if (ret == -ETIMEDOUT) { dev_err(tc->dev, "Timeout waiting for PHY to become ready"); return ret; @@ -876,7 +876,7 @@ static int tc_wait_link_training(struct tc_data *tc) int ret; ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE, - LT_LOOPDONE, 1, 1000); + LT_LOOPDONE, 500, 100000); if (ret) { dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n"); return ret; @@ -949,7 +949,7 @@ static int tc_main_link_enable(struct tc_data *tc) dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST); ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000); if (ret) { dev_err(dev, "timeout waiting for phy become ready"); return ret; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 6f6d6d1e60ae..f195a4732e0b 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -140,7 +140,8 @@ static int tfp410_attach(struct drm_bridge *bridge) dvi->connector_type, dvi->ddc); if (ret) { - dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret); + dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n", + ret); return ret; } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 6d4a29e99ae2..3035584f6dc7 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * depending on the hardware this may require the framebuffer * to be in a specific tiling format. */ - if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 || + if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 && + (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) || !plane->rotation_property) return false; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index cce0b1bba591..ed0fea2ac322 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) +static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs) { switch (pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: @@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, /* Teardown the old pdt, if there is one */ if (port->pdt != DP_PEER_DEVICE_NONE) { - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { /* * If the new PDT would also have an i2c bus, * don't bother with reregistering it */ if (new_pdt != DP_PEER_DEVICE_NONE && - drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { + drm_dp_mst_is_end_device(new_pdt, new_mcs)) { port->pdt = new_pdt; port->mcs = new_mcs; return 0; @@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, port->mcs = new_mcs; if (port->pdt != DP_PEER_DEVICE_NONE) { - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); } else { @@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, } if (port->pdt != DP_PEER_DEVICE_NONE && - drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + drm_dp_mst_is_end_device(port->pdt, port->mcs)) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_connector_set_tile_property(port->connector); @@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, mutex_unlock(&mgr->lock); } - if (old_ddps != port->ddps) { - if (port->ddps) { - if (!port->input) { - drm_dp_send_enum_path_resources(mgr, mstb, - port); - } + /* + * Reprobe PBN caps on both hotplug, and when re-probing the link + * for our parent mstb + */ + if (old_ddps != port->ddps || !created) { + if (port->ddps && !port->input) { + ret = drm_dp_send_enum_path_resources(mgr, mstb, + port); + if (ret == 1) + changed = true; } else { - port->available_pbn = 0; + port->full_pbn = 0; } } @@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->ddps = conn_stat->displayport_device_plug_status; if (old_ddps != port->ddps) { - if (port->ddps) { - dowork = true; - } else { - port->available_pbn = 0; - } + if (port->ddps && !port->input) + drm_dp_send_enum_path_resources(mgr, mstb, port); + else + port->full_pbn = 0; } new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; @@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg if (port->input || !port->ddps) continue; - if (!port->available_pbn) { - drm_modeset_lock(&mgr->base.lock, NULL); - drm_dp_send_enum_path_resources(mgr, mstb, port); - drm_modeset_unlock(&mgr->base.lock); - changed = true; - } - if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); @@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0) { + ret = 0; path_res = &txmsg->reply.u.path_resources; if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { @@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res->port_number, path_res->full_payload_bw_number, path_res->avail_payload_bw_number); - port->available_pbn = - path_res->avail_payload_bw_number; + + /* + * If something changed, make sure we send a + * hotplug + */ + if (port->full_pbn != path_res->full_payload_bw_number || + port->fec_capable != path_res->fec_capable) + ret = 1; + + port->full_pbn = path_res->full_payload_bw_number; port->fec_capable = path_res->fec_capable; } } kfree(txmsg); - return 0; + return ret; } static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) @@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) /* The link address will need to be re-sent on resume */ mstb->link_address_sent = false; - list_for_each_entry(port, &mstb->ports, next) { - /* The PBN for each port will also need to be re-probed */ - port->available_pbn = 0; - + list_for_each_entry(port, &mstb->ports, next) if (port->mstb) drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); - } } /** @@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, return false; } -static inline -int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, - struct drm_dp_mst_topology_state *mst_state) +static int +drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, + struct drm_dp_mst_topology_state *state); + +static int +drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_topology_state *state) { - struct drm_dp_mst_port *port; struct drm_dp_vcpi_allocation *vcpi; - int pbn_limit = 0, pbn_used = 0; + struct drm_dp_mst_port *port; + int pbn_used = 0, ret; + bool found = false; - list_for_each_entry(port, &branch->ports, next) { - if (port->mstb) - if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) - return -ENOSPC; + /* Check that we have at least one port in our state that's downstream + * of this branch, otherwise we can skip this branch + */ + list_for_each_entry(vcpi, &state->vcpis, next) { + if (!vcpi->pbn || + !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb)) + continue; - if (port->available_pbn > 0) - pbn_limit = port->available_pbn; + found = true; + break; } - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", - branch, pbn_limit); + if (!found) + return 0; - list_for_each_entry(vcpi, &mst_state->vcpis, next) { - if (!vcpi->pbn) - continue; + if (mstb->port_parent) + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", + mstb->port_parent->parent, mstb->port_parent, + mstb); + else + DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n", + mstb); + + list_for_each_entry(port, &mstb->ports, next) { + ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); + if (ret < 0) + return ret; - if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) - pbn_used += vcpi->pbn; + pbn_used += ret; } - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", - branch, pbn_used); - if (pbn_used > pbn_limit) { - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", - branch); + return pbn_used; +} + +static int +drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, + struct drm_dp_mst_topology_state *state) +{ + struct drm_dp_vcpi_allocation *vcpi; + int pbn_used = 0; + + if (port->pdt == DP_PEER_DEVICE_NONE) + return 0; + + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { + bool found = false; + + list_for_each_entry(vcpi, &state->vcpis, next) { + if (vcpi->port != port) + continue; + if (!vcpi->pbn) + return 0; + + found = true; + break; + } + if (!found) + return 0; + + /* This should never happen, as it means we tried to + * set a mode before querying the full_pbn + */ + if (WARN_ON(!port->full_pbn)) + return -EINVAL; + + pbn_used = vcpi->pbn; + } else { + pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, + state); + if (pbn_used <= 0) + return pbn_used; + } + + if (pbn_used > port->full_pbn) { + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", + port->parent, port, pbn_used, + port->full_pbn); return -ENOSPC; } - return 0; + + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", + port->parent, port, pbn_used, port->full_pbn); + + return pbn_used; } static inline int @@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); if (ret) break; - ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); - if (ret) + + mutex_lock(&mgr->lock); + ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, + mst_state); + mutex_unlock(&mgr->lock); + if (ret < 0) break; + else + ret = 0; } return ret; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index a421a2eed48a..df31e5782eed 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) if (ret) goto err_zero_use; - if (obj->import_attach) + if (obj->import_attach) { shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); - else + } else { + pgprot_t prot = PAGE_KERNEL; + + if (!shmem->map_cached) + prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + VM_MAP, prot); + } if (!shmem->vaddr) { DRM_DEBUG_KMS("Failed to vmap pages\n"); @@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) } vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + if (!shmem->map_cached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; return 0; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b481cafdde28..825abe38201a 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, } DRM_DEBUG_LEASE("Creating lease\n"); + /* lessee will take the ownership of leases */ lessee = drm_lease_create(lessor, &leases); if (IS_ERR(lessee)) { ret = PTR_ERR(lessee); + idr_destroy(&leases); goto out_leases; } @@ -580,7 +582,6 @@ out_lessee: out_leases: put_unused_fd(fd); - idr_destroy(&leases); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 10336b144c72..d4d64518e11b 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1698,6 +1698,13 @@ static int drm_mode_parse_cmdline_options(const char *str, if (rotation && freestanding) return -EINVAL; + if (!(rotation & DRM_MODE_ROTATE_MASK)) + rotation |= DRM_MODE_ROTATE_0; + + /* Make sure there is exactly one rotation defined */ + if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK)) + return -EINVAL; + mode->rotation_reflection = rotation; return 0; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 86d9b0e45c8c..1de2cde2277c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -967,7 +967,7 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, index = 0; for_each_sg(sgt->sgl, sg, sgt->nents, count) { - len = sg->length; + len = sg_dma_len(sg); page = sg_page(sg); addr = sg_dma_address(sg); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 8428ae12dfa5..1f79bc2a881e 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = { struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data) decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void decon_unbind(struct device *dev, struct device *master, void *data) @@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) decon_atomic_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static const struct component_ops decon_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index ff59c641fa80..1eed3327999f 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -40,6 +40,7 @@ struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx, decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, ctx->dev); + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } static void decon_ctx_remove(struct decon_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static u32 decon_calc_clkdiv(struct decon_context *ctx, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 9ebc02768847..619f81435c1b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev) * mapping. */ static int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; int ret; @@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, return ret; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { - if (to_dma_iommu_mapping(subdrv_dev)) + /* + * Keep the original DMA mapping of the sub-device and + * restore it on Exynos DRM detach, otherwise the DMA + * framework considers it as IOMMU-less during the next + * probe (in case of deferred probe or modular build) + */ + *dma_priv = to_dma_iommu_mapping(subdrv_dev); + if (*dma_priv) arm_iommu_detach_device(subdrv_dev); ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); @@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, * mapping */ static void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { arm_iommu_detach_device(subdrv_dev); - else if (IS_ENABLED(CONFIG_IOMMU_DMA)) + arm_iommu_attach_device(subdrv_dev, *dma_priv); + } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); clear_dma_max_seg_size(subdrv_dev); } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { struct exynos_drm_private *priv = drm->dev_private; @@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) priv->mapping = mapping; } - return drm_iommu_attach_device(drm, dev); + return drm_iommu_attach_device(drm, dev, dma_priv); } -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev) +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { if (IS_ENABLED(CONFIG_EXYNOS_IOMMU)) - drm_iommu_detach_device(drm, dev); + drm_iommu_detach_device(drm, dev, dma_priv); } void exynos_drm_cleanup_dma(struct drm_device *drm) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d4d21d8cfb90..6ae9056e7a18 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) return priv->mapping ? true : false; } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev); -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev); +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); void exynos_drm_cleanup_dma(struct drm_device *drm); #ifdef CONFIG_DRM_EXYNOS_DPI diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 33628d85edad..a85365c56d4d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1773,8 +1773,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret) { - dev_info(dev, "failed to get regulators: %d\n", ret); - return -EPROBE_DEFER; + if (ret != -EPROBE_DEFER) + dev_info(dev, "failed to get regulators: %d\n", ret); + return ret; } dsi->clks = devm_kcalloc(dev, @@ -1787,9 +1788,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(dsi->clks[i])) { if (strcmp(clk_names[i], "sclk_mipi") == 0) { - strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME); - i--; - continue; + dsi->clks[i] = devm_clk_get(dev, + OLD_SCLK_MIPI_CLK_NAME); + if (!IS_ERR(dsi->clks[i])) + continue; } dev_info(dev, "failed to get the clock: %s\n", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 8ea2e1d77802..29ab8be8604c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -97,6 +97,7 @@ struct fimc_scaler { struct fimc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops fimc_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 21aec38702fc..bb67cad8371f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { struct fimd_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) if (is_drm_iommu_supported(drm_dev)) fimd_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void fimd_unbind(struct device *dev, struct device *master, @@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_atomic_disable(ctx->crtc); - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); if (ctx->encoder) exynos_dpi_remove(ctx->encoder); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2a3382d43bc9..fcee33a43aca 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -232,6 +232,7 @@ struct g2d_runqueue_node { struct g2d_data { struct device *dev; + void *dma_priv; struct clk *gate_clk; void __iomem *regs; int irq; @@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = exynos_drm_register_dma(drm_dev, dev); + ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); if (ret < 0) { dev_err(dev, "failed to enable iommu.\n"); g2d_fini_cmdlist(g2d); @@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data) priv->g2d_dev = NULL; cancel_work_sync(&g2d->runqueue_work); - exynos_drm_unregister_dma(g2d->drm_dev, dev); + exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); } static const struct component_ops g2d_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 88b6fcaa20be..45e9aee8366a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -97,6 +97,7 @@ struct gsc_scaler { struct gsc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops gsc_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index b98482990d1a..dafa87b82052 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -56,6 +56,7 @@ struct rot_variant { struct rot_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock; @@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data) rot->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, @@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &rot->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(rot->drm_dev, rot->dev); + exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv); } static const struct component_ops rotator_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 497973e9b2c5..93c43c8d914e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -39,6 +39,7 @@ struct scaler_data { struct scaler_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock[SCALER_MAX_CLK]; @@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data) scaler->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &scaler->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev); + exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev, + &scaler->dma_priv); } static const struct component_ops scaler_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 9ff921f43a93..f141916eade6 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); - if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) { + if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) if (IS_ERR(hdata->reg_hdmi_en)) return PTR_ERR(hdata->reg_hdmi_en); - ret = regulator_enable(hdata->reg_hdmi_en); - if (ret) { - DRM_DEV_ERROR(dev, - "failed to enable hdmi-en regulator\n"); - return ret; - } - } - return hdmi_bridge_init(hdata); } @@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev) } } + if (!IS_ERR(hdata->reg_hdmi_en)) { + ret = regulator_enable(hdata->reg_hdmi_en); + if (ret) { + DRM_DEV_ERROR(dev, + "failed to enable hdmi-en regulator\n"); + goto err_hdmiphy; + } + } + pm_runtime_enable(dev); audio_infoframe = &hdata->audio.infoframe; @@ -2047,7 +2048,8 @@ err_unregister_audio: err_rpm_disable: pm_runtime_disable(dev); - + if (!IS_ERR(hdata->reg_hdmi_en)) + regulator_disable(hdata->reg_hdmi_en); err_hdmiphy: if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 38ae9c32feef..21b726baedea 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -94,6 +94,7 @@ struct mixer_context { struct platform_device *pdev; struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[MIXER_WIN_NR]; unsigned long flags; @@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, } } - return exynos_drm_register_dma(drm_dev, mixer_ctx->dev); + return exynos_drm_register_dma(drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static void mixer_ctx_remove(struct mixer_context *mixer_ctx) { - exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev); + exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h index 0da860200410..e2ac09894a6d 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h @@ -83,7 +83,6 @@ #define VSIZE_OFST 20 #define LDI_INT_EN 0x741C #define FRAME_END_INT_EN_OFST 1 -#define UNDERFLOW_INT_EN_OFST 2 #define LDI_CTRL 0x7420 #define BPP_OFST 3 #define DATA_GATE_EN BIT(2) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 73cd28a6ea07..86000127d4ee 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -46,7 +46,6 @@ struct ade_hw_ctx { struct clk *media_noc_clk; struct clk *ade_pix_clk; struct reset_control *reset; - struct work_struct display_reset_wq; bool power_on; int irq; @@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx) */ ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST, FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND); - ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1); } static bool ade_crtc_mode_fixup(struct drm_crtc *crtc, @@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc) MASK(1), 0); } -static void drm_underflow_wq(struct work_struct *work) -{ - struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx, - display_reset_wq); - struct drm_device *drm_dev = ctx->crtc->dev; - struct drm_atomic_state *state; - - state = drm_atomic_helper_suspend(drm_dev); - drm_atomic_helper_resume(drm_dev, state); -} - static irqreturn_t ade_irq_handler(int irq, void *data) { struct ade_hw_ctx *ctx = data; @@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data) MASK(1), 1); drm_crtc_handle_vblank(crtc); } - if (status & BIT(UNDERFLOW_INT_EN_OFST)) { - ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST, - MASK(1), 1); - DRM_ERROR("LDI underflow!"); - schedule_work(&ctx->display_reset_wq); - } return IRQ_HANDLED; } @@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev, if (ret) return ERR_PTR(-EIO); - INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq); ctx->crtc = crtc; return ctx; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index ba9595960bbe..907c4471f591 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR help This option enables capturing the GPU state when a hang is detected. This information is vital for triaging hangs and assists in debugging. - Please report any hang to - https://bugs.freedesktop.org/enter_bug.cgi?product=DRI - for triaging. + Please report any hang for triaging according to: + https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs If in doubt, say "Y". diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b8c5f8934dbd..a1f2411aa21b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -294,7 +294,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \ $(shell cd $(srctree)/$(src) && find * -name '*.h'))) quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) - cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@ + cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@ $(obj)/%.hdrtest: $(src)/%.h FORCE $(call if_changed_dep,hdrtest) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 33f1dc3d7c1a..d9a61f341070 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4251,7 +4251,9 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { - if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) + if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 3; + else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 064dd99bbc49..aa453953908b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11087,7 +11087,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) u32 base; if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) - base = obj->phys_handle->busaddr; + base = sg_dma_address(obj->mm.pages->sgl); else base = intel_plane_ggtt_offset(plane_state); @@ -17433,6 +17433,24 @@ retry: * have readout for pipe gamma enable. */ crtc_state->uapi.color_mgmt_changed = true; + + /* + * FIXME hack to force full modeset when DSC is being + * used. + * + * As long as we do not have full state readout and + * config comparison of crtc_state->dsc, we have no way + * to ensure reliable fastset. Remove once we have + * readout for DSC. + */ + if (crtc_state->dsc.compression_enable) { + ret = drm_atomic_add_affected_connectors(state, + &crtc->base); + if (ret) + goto out; + crtc_state->uapi.mode_changed = true; + drm_dbg_kms(dev, "Force full modeset for DSC\n"); + } } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 21561acfa3ac..46c40db992dd 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4466,13 +4466,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - u32 val; + u32 mask, val; - val = MBUS_ABOX_BT_CREDIT_POOL1(16) | - MBUS_ABOX_BT_CREDIT_POOL2(16) | - MBUS_ABOX_B_CREDIT(1) | - MBUS_ABOX_BW_CREDIT(1); + mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | + MBUS_ABOX_BT_CREDIT_POOL2_MASK | + MBUS_ABOX_B_CREDIT_MASK | + MBUS_ABOX_BW_CREDIT_MASK; + val = I915_READ(MBUS_ABOX_CTL); + val &= ~mask; + val |= MBUS_ABOX_BT_CREDIT_POOL1(16) | + MBUS_ABOX_BT_CREDIT_POOL2(16) | + MBUS_ABOX_B_CREDIT(1) | + MBUS_ABOX_BW_CREDIT(1); I915_WRITE(MBUS_ABOX_CTL, val); } @@ -4968,8 +4974,21 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE); I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE); } else { + u32 val; + I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask); I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask); + + /* Wa_22010178259:tgl */ + val = I915_READ(BW_BUDDY1_CTL); + val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK; + val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8); + I915_WRITE(BW_BUDDY1_CTL, val); + + val = I915_READ(BW_BUDDY2_CTL); + val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK; + val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8); + I915_WRITE(BW_BUDDY2_CTL, val); } } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c7424e2a04a3..208457005a11 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1360,7 +1360,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ - pm_qos_update_request(&i915->pm_qos, 0); + cpu_latency_qos_update_request(&i915->pm_qos, 0); intel_dp_check_edp(intel_dp); @@ -1488,7 +1488,7 @@ done: ret = recv_bytes; out: - pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) edp_panel_vdd_off(intel_dp, false); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 89c9cf5f38d2..83025052c965 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -852,10 +852,12 @@ void intel_psr_enable(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!crtc_state->has_psr) + if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) return; - if (WARN_ON(!CAN_PSR(dev_priv))) + dev_priv->psr.force_mode_changed = false; + + if (!crtc_state->has_psr) return; WARN_ON(dev_priv->drrs.dp); @@ -1009,6 +1011,8 @@ void intel_psr_update(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) return; + dev_priv->psr.force_mode_changed = false; + mutex_lock(&dev_priv->psr.lock); enable = crtc_state->has_psr && psr_global_enabled(psr->debug); @@ -1534,7 +1538,7 @@ void intel_psr_atomic_check(struct drm_connector *connector, struct drm_crtc_state *crtc_state; if (!CAN_PSR(dev_priv) || !new_state->crtc || - dev_priv->psr.initially_probed) + !dev_priv->psr.force_mode_changed) return; intel_connector = to_intel_connector(connector); @@ -1545,5 +1549,18 @@ void intel_psr_atomic_check(struct drm_connector *connector, crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc); crtc_state->mode_changed = true; - dev_priv->psr.initially_probed = true; +} + +void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv; + + if (!intel_dp) + return; + + dev_priv = dp_to_i915(intel_dp); + if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp) + return; + + dev_priv->psr.force_mode_changed = true; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index c58a1d438808..274fc6bb6221 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); void intel_psr_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); +void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index a2e57e62af30..151a1e8ae36a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -565,6 +565,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) return -ENODEV; + /* + * If the cancel fails, we then need to reset, cleanly! + * + * If the per-engine reset fails, all hope is lost! We resort + * to a full GPU reset in that unlikely case, but realistically + * if the engine could not reset, the full reset does not fare + * much better. The damage has been done. + * + * However, if we cannot reset an engine by itself, we cannot + * cleanup a hanging persistent context without causing + * colateral damage, and we should not pretend we can by + * exposing the interface. + */ + if (!intel_has_reset_engine(&ctx->i915->gt)) + return -ENODEV; + i915_gem_context_clear_persistence(ctx); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 60c984e10c4a..7643a30ba4cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb, if (unlikely(entry->flags & eb->invalid_flags)) return -EINVAL; - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) + if (unlikely(entry->alignment && + !is_power_of_2_u64(entry->alignment))) return -EINVAL; /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 35985218bd85..5da9f9e534b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); + cond_resched(); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index f64ad77e6b1e..c2174da35bb0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -285,9 +285,6 @@ struct drm_i915_gem_object { void *gvt_info; }; - - /** for phys allocated objects */ - struct drm_dma_handle *phys_handle; }; static inline struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index b1b7c1b3038a..b07bb40edd5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -22,88 +22,87 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; - struct drm_dma_handle *phys; - struct sg_table *st; struct scatterlist *sg; - char *vaddr; + struct sg_table *st; + dma_addr_t dma; + void *vaddr; + void *dst; int i; - int err; if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) return -EINVAL; - /* Always aligning to the object size, allows a single allocation + /* + * Always aligning to the object size, allows a single allocation * to handle all possible callers, and given typical object sizes, * the alignment of the buddy allocation will naturally match. */ - phys = drm_pci_alloc(obj->base.dev, - roundup_pow_of_two(obj->base.size), - roundup_pow_of_two(obj->base.size)); - if (!phys) + vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + &dma, GFP_KERNEL); + if (!vaddr) return -ENOMEM; - vaddr = phys->vaddr; + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_pci; + + if (sg_alloc_table(st, 1, GFP_KERNEL)) + goto err_st; + + sg = st->sgl; + sg->offset = 0; + sg->length = obj->base.size; + + sg_assign_page(sg, (struct page *)vaddr); + sg_dma_address(sg) = dma; + sg_dma_len(sg) = obj->base.size; + + dst = vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; - char *src; + void *src; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto err_phys; - } + if (IS_ERR(page)) + goto err_st; src = kmap_atomic(page); - memcpy(vaddr, src, PAGE_SIZE); - drm_clflush_virt_range(vaddr, PAGE_SIZE); + memcpy(dst, src, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); kunmap_atomic(src); put_page(page); - vaddr += PAGE_SIZE; + dst += PAGE_SIZE; } intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) { - err = -ENOMEM; - goto err_phys; - } - - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - err = -ENOMEM; - goto err_phys; - } - - sg = st->sgl; - sg->offset = 0; - sg->length = obj->base.size; - - sg_dma_address(sg) = phys->busaddr; - sg_dma_len(sg) = obj->base.size; - - obj->phys_handle = phys; - __i915_gem_object_set_pages(obj, st, sg->length); return 0; -err_phys: - drm_pci_free(obj->base.dev, phys); - - return err; +err_st: + kfree(st); +err_pci: + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + vaddr, dma); + return -ENOMEM; } static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { + dma_addr_t dma = sg_dma_address(pages->sgl); + void *vaddr = sg_page(pages->sgl); + __i915_gem_object_release_shmem(obj, pages, false); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; + void *src = vaddr; int i; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { @@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, continue; dst = kmap_atomic(page); - drm_clflush_virt_range(vaddr, PAGE_SIZE); - memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(src, PAGE_SIZE); + memcpy(dst, src, PAGE_SIZE); kunmap_atomic(dst); set_page_dirty(page); if (obj->mm.madv == I915_MADV_WILLNEED) mark_page_accessed(page); put_page(page); - vaddr += PAGE_SIZE; + + src += PAGE_SIZE; } obj->mm.dirty = false; } @@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); - drm_pci_free(obj->base.dev, obj->phys_handle); + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + vaddr, dma); } static void phys_release(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index f7e4b39c734f..59b387ade49c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -256,8 +256,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) with_intel_runtime_pm(&i915->runtime_pm, wakeref) { freed = i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); + I915_SHRINK_UNBOUND); } return freed; @@ -336,7 +335,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) freed_pages = 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_WRITEBACK); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index ef7c74cff28a..43912e9b683d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) - return PTR_ERR(obj); + return false; mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 0ba524a414c6..cbad7fe722ce 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -136,6 +136,9 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) struct intel_engine_cs *engine = container_of(b, struct intel_engine_cs, breadcrumbs); + if (unlikely(intel_engine_is_virtual(engine))) + engine = intel_virtual_engine_get_sibling(engine, 0); + intel_engine_add_retire(engine, tl); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 7ef1d37970f6..24c99d0838af 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -99,6 +99,9 @@ static bool add_retire(struct intel_engine_cs *engine, void intel_engine_add_retire(struct intel_engine_cs *engine, struct intel_timeline *tl) { + /* We don't deal well with the engine disappearing beneath us */ + GEM_BUG_ON(intel_engine_is_virtual(engine)); + if (add_retire(engine, tl)) schedule_work(&engine->retire_work); } @@ -144,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) fence = i915_active_fence_get(&tl->last_request); if (fence) { + mutex_unlock(&tl->mutex); + timeout = dma_fence_wait_timeout(fence, interruptible, timeout); dma_fence_put(fence); + + /* Retirement is best effort */ + if (!mutex_trylock(&tl->mutex)) { + active_count++; + goto out_active; + } } } if (!retire_requests(tl) || flush_submission(gt)) active_count++; + mutex_unlock(&tl->mutex); - spin_lock(&timelines->lock); +out_active: spin_lock(&timelines->lock); - /* Resume iteration after dropping lock */ + /* Resume list iteration after reacquiring spinlock */ list_safe_reset_next(tl, tn, link); if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - mutex_unlock(&tl->mutex); /* Defer the final release to after the spinlock */ if (refcount_dec_and_test(&tl->kref.refcount)) { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a13a8c4b65ab..31455eceeb0c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -237,7 +237,8 @@ static void execlists_init_reg_state(u32 *reg_state, bool close); static void __execlists_update_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine); + const struct intel_engine_cs *engine, + u32 head); static void mark_eio(struct i915_request *rq) { @@ -1186,12 +1187,11 @@ static void reset_active(struct i915_request *rq, head = rq->tail; else head = active_request(ce->timeline, rq)->head; - ce->ring->head = intel_ring_wrap(ce->ring, head); - intel_ring_update_space(ce->ring); + head = intel_ring_wrap(ce->ring, head); /* Scrub the context image to prevent replaying the previous batch */ restore_default_state(ce, engine); - __execlists_update_reg_state(ce, engine); + __execlists_update_reg_state(ce, engine, head); /* We've switched away, so this should be a no-op, but intent matters */ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; @@ -1321,7 +1321,7 @@ static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->context; u64 desc = ce->lrc_desc; - u32 tail; + u32 tail, prev; /* * WaIdleLiteRestore:bdw,skl @@ -1334,9 +1334,15 @@ static u64 execlists_update_context(struct i915_request *rq) * subsequent resubmissions (for lite restore). Should that fail us, * and we try and submit the same tail again, force the context * reload. + * + * If we need to return to a preempted context, we need to skip the + * lite-restore and force it to reload the RING_TAIL. Otherwise, the + * HW has a tendency to ignore us rewinding the TAIL to the end of + * an earlier request. */ tail = intel_ring_set_tail(rq->ring, rq->tail); - if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail)) + prev = ce->lrc_reg_state[CTX_RING_TAIL]; + if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) desc |= CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state[CTX_RING_TAIL] = tail; rq->tail = rq->wa_tail; @@ -1594,16 +1600,10 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, spin_unlock(&old->breadcrumbs.irq_lock); } -static struct i915_request * -last_active(const struct intel_engine_execlists *execlists) -{ - struct i915_request * const *last = READ_ONCE(execlists->active); - - while (*last && i915_request_completed(*last)) - last++; - - return *last; -} +#define for_each_waiter(p__, rq__) \ + list_for_each_entry_lockless(p__, \ + &(rq__)->sched.waiters_list, \ + wait_link) static void defer_request(struct i915_request *rq, struct list_head * const pl) { @@ -1622,7 +1622,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl) GEM_BUG_ON(i915_request_is_active(rq)); list_move_tail(&rq->sched.link, pl); - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + for_each_waiter(p, rq) { struct i915_request *w = container_of(p->waiter, typeof(*w), sched); @@ -1668,11 +1668,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) if (!intel_engine_has_timeslices(engine)) return false; - if (list_is_last(&rq->sched.link, &engine->active.requests)) - return false; - - hint = max(rq_prio(list_next_entry(rq, sched.link)), - engine->execlists.queue_priority_hint); + hint = engine->execlists.queue_priority_hint; + if (!list_is_last(&rq->sched.link, &engine->active.requests)) + hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); return hint >= effective_prio(rq); } @@ -1714,16 +1712,26 @@ static void set_timeslice(struct intel_engine_cs *engine) set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); } +static void start_timeslice(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + + execlists->switch_priority_hint = execlists->queue_priority_hint; + + if (timer_pending(&execlists->timer)) + return; + + set_timer_ms(&execlists->timer, timeslice(engine)); +} + static void record_preemption(struct intel_engine_execlists *execlists) { (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); } -static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) { - struct i915_request *rq; - - rq = last_active(&engine->execlists); if (!rq) return 0; @@ -1734,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) return READ_ONCE(engine->props.preempt_timeout_ms); } -static void set_preempt_timeout(struct intel_engine_cs *engine) +static void set_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) { if (!intel_engine_has_preempt_reset(engine)) return; set_timer_ms(&engine->execlists.preempt, - active_preempt_timeout(engine)); + active_preempt_timeout(engine, rq)); } static inline void clear_ports(struct i915_request **ports, int count) @@ -1753,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request **port = execlists->pending; struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request * const *active; struct i915_request *last; struct rb_node *rb; bool submit = false; @@ -1807,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * i.e. we will retrigger preemption following the ack in case * of trouble. */ - last = last_active(execlists); + active = READ_ONCE(execlists->active); + while ((last = *active) && i915_request_completed(last)) + active++; + if (last) { if (need_preempt(engine, last, rb)) { ENGINE_TRACE(engine, @@ -1834,14 +1847,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ __unwind_incomplete_requests(engine); - /* - * If we need to return to the preempted context, we - * need to skip the lite-restore and force it to - * reload the RING_TAIL. Otherwise, the HW has a - * tendency to ignore us rewinding the TAIL to the - * end of an earlier request. - */ - last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last = NULL; } else if (need_timeslice(engine, last) && timer_expired(&engine->execlists.timer)) { @@ -1885,11 +1890,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * Even if ELSP[1] is occupied and not worthy * of timeslices, our queue might be. */ - if (!execlists->timer.expires && - need_timeslice(engine, last)) - set_timer_ms(&execlists->timer, - timeslice(engine)); - + start_timeslice(engine); return; } } @@ -1924,7 +1925,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.active.lock); - return; /* leave this for another */ + start_timeslice(engine); + return; /* leave this for another sibling */ } ENGINE_TRACE(engine, @@ -2100,7 +2102,7 @@ done: * Skip if we ended up with exactly the same set of requests, * e.g. trying to timeslice a pair of ordered contexts */ - if (!memcmp(execlists->active, execlists->pending, + if (!memcmp(active, execlists->pending, (port - execlists->pending + 1) * sizeof(*port))) { do execlists_schedule_out(fetch_and_zero(port)); @@ -2111,7 +2113,7 @@ done: clear_ports(port + 1, last_port - port); execlists_submit_ports(engine); - set_preempt_timeout(engine); + set_preempt_timeout(engine, *active); } else { skip_submit: ring_set_paused(engine, 0); @@ -2860,16 +2862,17 @@ static void execlists_context_unpin(struct intel_context *ce) static void __execlists_update_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine) + const struct intel_engine_cs *engine, + u32 head) { struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; - GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, head)); GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); - regs[CTX_RING_HEAD] = ring->head; + regs[CTX_RING_HEAD] = head; regs[CTX_RING_TAIL] = ring->tail; /* RPCS */ @@ -2898,7 +2901,7 @@ __execlists_context_pin(struct intel_context *ce, ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - __execlists_update_reg_state(ce, engine); + __execlists_update_reg_state(ce, engine, ce->ring->tail); return 0; } @@ -2939,7 +2942,7 @@ static void execlists_context_reset(struct intel_context *ce) /* Scrub away the garbage */ execlists_init_reg_state(ce->lrc_reg_state, ce, ce->engine, ce->ring, true); - __execlists_update_reg_state(ce, ce->engine); + __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; } @@ -3494,6 +3497,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; struct i915_request *rq; + u32 head; mb(); /* paranoia: read the CSB pointers from after the reset */ clflush(execlists->csb_write); @@ -3521,15 +3525,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) if (i915_request_completed(rq)) { /* Idle context; tidy up the ring so we can restart afresh */ - ce->ring->head = intel_ring_wrap(ce->ring, rq->tail); + head = intel_ring_wrap(ce->ring, rq->tail); goto out_replay; } /* Context has requests still in-flight; it should not be idle! */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); rq = active_request(ce->timeline, rq); - ce->ring->head = intel_ring_wrap(ce->ring, rq->head); - GEM_BUG_ON(ce->ring->head == ce->ring->tail); + head = intel_ring_wrap(ce->ring, rq->head); + GEM_BUG_ON(head == ce->ring->tail); /* * If this request hasn't started yet, e.g. it is waiting on a @@ -3574,10 +3578,9 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) out_replay: ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", - ce->ring->head, ce->ring->tail); - intel_ring_update_space(ce->ring); + head, ce->ring->tail); __execlists_reset_reg_state(ce, engine); - __execlists_update_reg_state(ce, engine); + __execlists_update_reg_state(ce, engine, head); ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ unwind: @@ -3997,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request, *cs++ = preparser_disable(false); intel_ring_advance(request, cs); - - /* - * Wa_1604544889:tgl - */ - if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) { - flags = 0; - flags |= PIPE_CONTROL_CS_STALL; - flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; - - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; - - cs = intel_ring_begin(request, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cs = gen8_emit_pipe_control(cs, flags, - LRC_PPHWSP_SCRATCH_ADDR); - intel_ring_advance(request, cs); - } } return 0; @@ -5220,10 +5203,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, restore_default_state(ce, engine); /* Rerun the request; its payload has been neutered (if guilty). */ - ce->ring->head = head; - intel_ring_update_space(ce->ring); - - __execlists_update_reg_state(ce, engine); + __execlists_update_reg_state(ce, engine, head); } bool diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 374b28f13ca0..6ff803f397c4 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) kref_init(&ring->ref); ring->size = size; + ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); /* * Workaround an erratum on the i830 which causes a hang if diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h index ea2839d9e044..5bdce24994aa 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.h +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) return pos & (ring->size - 1); } +static inline int intel_ring_direction(const struct intel_ring *ring, + u32 next, u32 prev) +{ + typecheck(typeof(ring->size), next); + typecheck(typeof(ring->size), prev); + return (next - prev) << ring->wrap; +} + static inline bool intel_ring_offset_valid(const struct intel_ring *ring, unsigned int pos) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h index d9f17f38e0cc..1a189ea00fd8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_types.h +++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h @@ -39,12 +39,13 @@ struct intel_ring { */ atomic_t pin_count; - u32 head; - u32 tail; - u32 emit; + u32 head; /* updated during retire, loosely tracks RING_HEAD */ + u32 tail; /* updated on submission, used for RING_TAIL */ + u32 emit; /* updated during request construction */ u32 space; u32 size; + u32 wrap; u32 effective_size; }; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 87716529cd2f..d8d9f1179c2b 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl) static void cacheline_free(struct intel_timeline_cacheline *cl) { + if (!i915_active_acquire_if_busy(&cl->active)) { + __idle_cacheline_free(cl); + return; + } + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); - if (i915_active_is_idle(&cl->active)) - __idle_cacheline_free(cl); + i915_active_release(&cl->active); } int intel_timeline_init(struct intel_timeline *timeline, diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 4e292d4bf7b9..6c2f8462e0f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -575,24 +575,19 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - u32 val; - /* Wa_1409142259:tgl */ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); - /* Wa_1604555607:tgl */ - val = intel_uncore_read(engine->uncore, FF_MODE2); - val &= ~FF_MODE2_TDS_TIMER_MASK; - val |= FF_MODE2_TDS_TIMER_128; /* - * FIXME: FF_MODE2 register is not readable till TGL B0. We can - * enable verification of WA from the later steppings, which enables - * the read of FF_MODE2. + * Wa_1604555607:gen12 and Wa_1608008084:gen12 + * FF_MODE2 register will return the wrong value when read. The default + * value for this register is zero for all fields and there are no bit + * masks. So instead of doing a RMW we should just write the TDS timer + * value for Wa_1604555607. */ - wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val, - IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 : - FF_MODE2_TDS_TIMER_MASK); + wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, 0); } static void @@ -1534,15 +1529,34 @@ err_obj: return ERR_PTR(err); } +static const struct { + u32 start; + u32 end; +} mcr_ranges_gen8[] = { + { .start = 0x5500, .end = 0x55ff }, + { .start = 0x7000, .end = 0x7fff }, + { .start = 0x9400, .end = 0x97ff }, + { .start = 0xb000, .end = 0xb3ff }, + { .start = 0xe000, .end = 0xe7ff }, + {}, +}; + static bool mcr_range(struct drm_i915_private *i915, u32 offset) { + int i; + + if (INTEL_GEN(i915) < 8) + return false; + /* - * Registers in this range are affected by the MCR selector + * Registers in these ranges are affected by the MCR selector * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) - return true; + for (i = 0; mcr_ranges_gen8[i].start; i++) + if (offset >= mcr_ranges_gen8[i].start && + offset <= mcr_ranges_gen8[i].end) + return true; return false; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 65718ca2326e..b292f8cbd0bf 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -186,7 +186,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) } GEM_BUG_ON(!ce[1]->ring->size); intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); - __execlists_update_reg_state(ce[1], engine); + __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head); rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); if (IS_ERR(rq[0])) { diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e1c313da6c00..a62bdf9be682 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; /* TODO: add more platforms support */ - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) { if (connected) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 2477a1e5a166..ae139f0877ae 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { + list_del(pos); intel_gvt_hypervisor_put_vfio_device(vgpu); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); kfree(dmabuf_obj); - list_del(pos); break; } } diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 867e7629025b..33569b910ed5 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v) /* there's features depending on version! */ v->header.version = 155; v->header.header_size = sizeof(v->header); - v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); + v->header.vbt_size = sizeof(struct vbt); v->header.bdb_offset = offsetof(struct vbt, bdb_header); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); v->bdb_header.version = 186; /* child_dev_size = 33 */ v->bdb_header.header_size = sizeof(v->bdb_header); - v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) - - sizeof(struct bdb_header); + v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header); /* general features */ v->general_features_header.id = BDB_GENERAL_FEATURES; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 85bd9bf4f6ee..345c2aa3b491 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - mutex_lock(&vgpu->vgpu_lock); - WARN(vgpu->active, "vGPU is still active!\n"); + /* + * remove idr first so later clean can judge if need to stop + * service if no active vgpu. + */ + mutex_lock(&gvt->lock); + idr_remove(&gvt->vgpu_idr, vgpu->id); + mutex_unlock(&gvt->lock); + + mutex_lock(&vgpu->vgpu_lock); intel_gvt_debugfs_remove_vgpu(vgpu); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); @@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) mutex_unlock(&vgpu->vgpu_lock); mutex_lock(&gvt->lock); - idr_remove(&gvt->vgpu_idr, vgpu->id); if (idr_is_empty(&gvt->vgpu_idr)) intel_gvt_clean_irq(gvt); intel_gvt_update_vgpu_types(gvt); @@ -560,9 +566,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); - intel_vgpu_reset_display(vgpu); if (dmlr) { + intel_vgpu_reset_display(vgpu); intel_vgpu_reset_cfg_space(vgpu); /* only reset the failsafe mode when dmlr reset */ vgpu->failsafe = false; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f7385abdd74b..eec1d6a6aa22 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -56,6 +56,7 @@ #include "display/intel_hotplug.h" #include "display/intel_overlay.h" #include "display/intel_pipe_crc.h" +#include "display/intel_psr.h" #include "display/intel_sprite.h" #include "display/intel_vga.h" @@ -330,6 +331,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915) intel_init_ipc(i915); + intel_psr_set_force_mode_changed(i915->psr.dp); + return 0; cleanup_gem: @@ -502,8 +505,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->sb_lock); - pm_qos_add_request(&dev_priv->sb_qos, - PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); @@ -568,7 +570,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) vlv_free_s0ix_state(dev_priv); i915_workqueues_cleanup(dev_priv); - pm_qos_remove_request(&dev_priv->sb_qos); + cpu_latency_qos_remove_request(&dev_priv->sb_qos); mutex_destroy(&dev_priv->sb_lock); } @@ -1226,8 +1228,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) } } - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); intel_gt_init_workarounds(dev_priv); @@ -1273,7 +1274,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); err_mem_regions: intel_memory_regions_driver_release(dev_priv); err_ggtt: @@ -1296,7 +1297,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); } /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 077af22b8340..810e3ccd56ec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -505,7 +505,7 @@ struct i915_psr { bool dc3co_enabled; u32 dc3co_exit_delay; struct delayed_work idle_work; - bool initially_probed; + bool force_mode_changed; }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c2de2f45b459..5f6e63952821 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -180,7 +180,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - void *vaddr = obj->phys_handle->vaddr + args->offset; + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); /* @@ -844,10 +844,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_fast(obj, args); if (ret == -EFAULT || ret == -ENOSPC) { - if (obj->phys_handle) - ret = i915_gem_phys_pwrite(obj, args, file); - else + if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(obj, args); + else + ret = i915_gem_phys_pwrite(obj, args, file); } i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 594341e27a47..9e401a5fcae8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1852,7 +1852,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error) if (!xchg(&warned, true) && ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); - pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); + pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); + pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 83f01401b8b5..f631f6d21127 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -437,7 +437,7 @@ static const struct intel_device_info snb_m_gt2_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_type = INTEL_PPGTT_ALIASING, \ .ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ @@ -494,7 +494,7 @@ static const struct intel_device_info vlv_info = { .has_rps = true, .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0f556d80ba36..3b6b913bd27a 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1954,9 +1954,10 @@ out: return i915_vma_get(oa_bo->vma); } -static int emit_oa_config(struct i915_perf_stream *stream, - struct i915_oa_config *oa_config, - struct intel_context *ce) +static struct i915_request * +emit_oa_config(struct i915_perf_stream *stream, + struct i915_oa_config *oa_config, + struct intel_context *ce) { struct i915_request *rq; struct i915_vma *vma; @@ -1964,7 +1965,7 @@ static int emit_oa_config(struct i915_perf_stream *stream, vma = get_oa_vma(stream, oa_config); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) @@ -1989,13 +1990,17 @@ static int emit_oa_config(struct i915_perf_stream *stream, err = rq->engine->emit_bb_start(rq, vma->node.start, 0, I915_DISPATCH_SECURE); + if (err) + goto err_add_request; + + i915_request_get(rq); err_add_request: i915_request_add(rq); err_vma_unpin: i915_vma_unpin(vma); err_vma_put: i915_vma_put(vma); - return err; + return err ? ERR_PTR(err) : rq; } static struct intel_context *oa_context(struct i915_perf_stream *stream) @@ -2003,7 +2008,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream) return stream->pinned_ctx ?: stream->engine->kernel_context; } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +hsw_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; @@ -2406,7 +2412,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); } -static int gen8_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +gen8_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2448,7 +2455,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) */ ret = lrc_configure_all_contexts(stream, oa_config); if (ret) - return ret; + return ERR_PTR(ret); return emit_oa_config(stream, oa_config, oa_context(stream)); } @@ -2460,7 +2467,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); } -static int gen12_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +gen12_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2491,7 +2499,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) */ ret = gen12_configure_all_contexts(stream, oa_config); if (ret) - return ret; + return ERR_PTR(ret); /* * For Gen12, performance counters are context @@ -2501,7 +2509,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) if (stream->ctx) { ret = gen12_configure_oar_context(stream, true); if (ret) - return ret; + return ERR_PTR(ret); } return emit_oa_config(stream, oa_config, oa_context(stream)); @@ -2696,6 +2704,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = { .read = i915_oa_read, }; +static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) +{ + struct i915_request *rq; + + rq = stream->perf->ops.enable_metric_set(stream); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + return 0; +} + /** * i915_oa_stream_init - validate combined props for OA stream and init * @stream: An i915 perf stream @@ -2829,7 +2851,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->ops = &i915_oa_stream_ops; perf->exclusive_stream = stream; - ret = perf->ops.enable_metric_set(stream); + ret = i915_perf_stream_enable_sync(stream); if (ret) { DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; @@ -3147,7 +3169,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, return -EINVAL; if (config != stream->oa_config) { - int err; + struct i915_request *rq; /* * If OA is bound to a specific context, emit the @@ -3158,11 +3180,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, * When set globally, we use a low priority kernel context, * so it will effectively take effect when idle. */ - err = emit_oa_config(stream, config, oa_context(stream)); - if (err == 0) + rq = emit_oa_config(stream, config, oa_context(stream)); + if (!IS_ERR(rq)) { config = xchg(&stream->oa_config, config); - else - ret = err; + i915_request_put(rq); + } else { + ret = PTR_ERR(rq); + } } i915_oa_config_put(config); diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 45e581455f5d..a0e22f00f6cf 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -339,7 +339,8 @@ struct i915_oa_ops { * counter reports being sampled. May apply system constraints such as * disabling EU clock gating as required. */ - int (*enable_metric_set)(struct i915_perf_stream *stream); + struct i915_request * + (*enable_metric_set)(struct i915_perf_stream *stream); /** * @disable_metric_set: Remove system constraints associated with using diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index ec0299490dd4..aa729d04abe2 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -822,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev, return sprintf(buf, "config=0x%lx\n", eattr->val); } -static struct attribute_group i915_pmu_events_attr_group = { - .name = "events", - /* Patch in attrs at runtime. */ -}; - static ssize_t i915_pmu_get_attr_cpumask(struct device *dev, struct device_attribute *attr, @@ -846,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = { .attrs = i915_cpumask_attrs, }; -static const struct attribute_group *i915_pmu_attr_groups[] = { - &i915_pmu_format_attr_group, - &i915_pmu_events_attr_group, - &i915_pmu_cpumask_attr_group, - NULL -}; - #define __event(__config, __name, __unit) \ { \ .config = (__config), \ @@ -1026,23 +1014,23 @@ err_alloc: static void free_event_attributes(struct i915_pmu *pmu) { - struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; + struct attribute **attr_iter = pmu->events_attr_group.attrs; for (; *attr_iter; attr_iter++) kfree((*attr_iter)->name); - kfree(i915_pmu_events_attr_group.attrs); + kfree(pmu->events_attr_group.attrs); kfree(pmu->i915_attr); kfree(pmu->pmu_attr); - i915_pmu_events_attr_group.attrs = NULL; + pmu->events_attr_group.attrs = NULL; pmu->i915_attr = NULL; pmu->pmu_attr = NULL; } static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) { - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); + struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); GEM_BUG_ON(!pmu->base.event_init); @@ -1055,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) { - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); + struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); unsigned int target; GEM_BUG_ON(!pmu->base.event_init); @@ -1072,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) return 0; } -static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; - static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) { enum cpuhp_state slot; @@ -1087,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) return ret; slot = ret; - ret = cpuhp_state_add_instance(slot, &pmu->node); + ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node); if (ret) { cpuhp_remove_multi_state(slot); return ret; } - cpuhp_slot = slot; + pmu->cpuhp.slot = slot; return 0; } static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) { - WARN_ON(cpuhp_slot == CPUHP_INVALID); - WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node)); - cpuhp_remove_multi_state(cpuhp_slot); + WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID); + WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node)); + cpuhp_remove_multi_state(pmu->cpuhp.slot); + pmu->cpuhp.slot = CPUHP_INVALID; } static bool is_igp(struct drm_i915_private *i915) @@ -1118,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; + const struct attribute_group *attr_groups[] = { + &i915_pmu_format_attr_group, + &pmu->events_attr_group, + &i915_pmu_cpumask_attr_group, + NULL + }; + int ret = -ENOMEM; if (INTEL_GEN(i915) <= 2) { @@ -1128,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915) spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; + pmu->cpuhp.slot = CPUHP_INVALID; if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, @@ -1143,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915) if (!pmu->name) goto err; - i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); - if (!i915_pmu_events_attr_group.attrs) + pmu->events_attr_group.name = "events"; + pmu->events_attr_group.attrs = create_event_attributes(pmu); + if (!pmu->events_attr_group.attrs) goto err_name; - pmu->base.attr_groups = i915_pmu_attr_groups; + pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), + GFP_KERNEL); + if (!pmu->base.attr_groups) + goto err_attr; + pmu->base.task_ctx_nr = perf_invalid_context; pmu->base.event_init = i915_pmu_event_init; pmu->base.add = i915_pmu_event_add; @@ -1159,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915) ret = perf_pmu_register(&pmu->base, pmu->name, -1); if (ret) - goto err_attr; + goto err_groups; ret = i915_pmu_register_cpuhp_state(pmu); if (ret) @@ -1169,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915) err_unreg: perf_pmu_unregister(&pmu->base); +err_groups: + kfree(pmu->base.attr_groups); err_attr: pmu->base.event_init = NULL; free_event_attributes(pmu); @@ -1194,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) perf_pmu_unregister(&pmu->base); pmu->base.event_init = NULL; + kfree(pmu->base.attr_groups); if (!is_igp(i915)) kfree(pmu->name); free_event_attributes(pmu); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 6c1647c5daf2..f1d6cad0d7d5 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -39,9 +39,12 @@ struct i915_pmu_sample { struct i915_pmu { /** - * @node: List node for CPU hotplug handling. + * @cpuhp: Struct used for CPU hotplug handling. */ - struct hlist_node node; + struct { + struct hlist_node node; + enum cpuhp_state slot; + } cpuhp; /** * @base: PMU base. */ @@ -105,6 +108,10 @@ struct i915_pmu { */ ktime_t sleep_last; /** + * @events_attr_group: Device events attribute group. + */ + struct attribute_group events_attr_group; + /** * @i915_attr: Memory block holding device attributes. */ void *i915_attr; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6cc55c103f67..3575fd30756b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7757,6 +7757,7 @@ enum { #define BW_BUDDY1_CTL _MMIO(0x45140) #define BW_BUDDY2_CTL _MMIO(0x45150) #define BW_BUDDY_DISABLE REG_BIT(31) +#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16) #define BW_BUDDY1_PAGE_MASK _MMIO(0x45144) #define BW_BUDDY2_PAGE_MASK _MMIO(0x45154) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 78a5f5d3c070..a18b2a244706 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -275,7 +275,7 @@ bool i915_request_retire(struct i915_request *rq) spin_unlock_irq(&rq->lock); remove_from_client(rq); - list_del(&rq->link); + list_del_rcu(&rq->link); intel_context_exit(rq->context); intel_context_unpin(rq->context); @@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } +static void irq_semaphore_cb(struct irq_work *wrk) +{ + struct i915_request *rq = + container_of(wrk, typeof(*rq), semaphore_work); + + i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE); + i915_request_put(rq); +} + static int __i915_sw_fence_call semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - struct i915_request *request = - container_of(fence, typeof(*request), semaphore); + struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); switch (state) { case FENCE_COMPLETE: - i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); + if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) { + i915_request_get(rq); + init_irq_work(&rq->semaphore_work, irq_semaphore_cb); + irq_work_queue(&rq->semaphore_work); + } break; case FENCE_FREE: - i915_request_put(request); + i915_request_put(rq); break; } @@ -595,6 +607,8 @@ static void __i915_request_ctor(void *arg) i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify); + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); + rq->file_priv = NULL; rq->capture_list = NULL; @@ -653,25 +667,30 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) } } - ret = intel_timeline_get_seqno(tl, rq, &seqno); - if (ret) - goto err_free; - rq->i915 = ce->engine->i915; rq->context = ce; rq->engine = ce->engine; rq->ring = ce->ring; rq->execution_mask = ce->engine->mask; + kref_init(&rq->fence.refcount); + rq->fence.flags = 0; + rq->fence.error = 0; + INIT_LIST_HEAD(&rq->fence.cb_list); + + ret = intel_timeline_get_seqno(tl, rq, &seqno); + if (ret) + goto err_free; + + rq->fence.context = tl->fence_context; + rq->fence.seqno = seqno; + RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); rq->hwsp_seqno = tl->hwsp_seqno; rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ - dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, - tl->fence_context, seqno); - /* We bump the ref for the fence chain */ i915_sw_fence_reinit(&i915_request_get(rq)->submit); i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); @@ -714,6 +733,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->infix = rq->ring->emit; /* end of header; start of user payload */ intel_context_mark_active(ce); + list_add_tail_rcu(&rq->link, &tl->requests); + return rq; err_unwind: @@ -767,16 +788,26 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) struct dma_fence *fence; int err; - GEM_BUG_ON(i915_request_timeline(rq) == - rcu_access_pointer(signal->timeline)); + if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) + return 0; + + if (i915_request_started(signal)) + return 0; fence = NULL; rcu_read_lock(); spin_lock_irq(&signal->lock); - if (!i915_request_started(signal) && - !list_is_first(&signal->link, - &rcu_dereference(signal->timeline)->requests)) { - struct i915_request *prev = list_prev_entry(signal, link); + do { + struct list_head *pos = READ_ONCE(signal->link.prev); + struct i915_request *prev; + + /* Confirm signal has not been retired, the link is valid */ + if (unlikely(i915_request_started(signal))) + break; + + /* Is signal the earliest request on its timeline? */ + if (pos == &rcu_dereference(signal->timeline)->requests) + break; /* * Peek at the request before us in the timeline. That @@ -784,20 +815,25 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) * after acquiring a reference to it, confirm that it is * still part of the signaler's timeline. */ - if (i915_request_get_rcu(prev)) { - if (list_next_entry(prev, link) == signal) - fence = &prev->fence; - else - i915_request_put(prev); + prev = list_entry(pos, typeof(*prev), link); + if (!i915_request_get_rcu(prev)) + break; + + /* After the strong barrier, confirm prev is still attached */ + if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) { + i915_request_put(prev); + break; } - } + + fence = &prev->fence; + } while (0); spin_unlock_irq(&signal->lock); rcu_read_unlock(); if (!fence) return 0; err = 0; - if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) + if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) err = i915_sw_fence_await_dma_fence(&rq->submit, fence, 0, I915_FENCE_GFP); @@ -1235,8 +1271,6 @@ __i915_request_add_to_timeline(struct i915_request *rq) 0); } - list_add_tail(&rq->link, &timeline->requests); - /* * Make sure that no request gazumped us - if it was allocated after * our i915_request_alloc() and called __i915_request_add() before @@ -1296,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq, * decide whether to preempt the entire chain so that it is ready to * run at the earliest possible convenience. */ - i915_sw_fence_commit(&rq->semaphore); if (attr && rq->engine->schedule) rq->engine->schedule(rq, attr); + i915_sw_fence_commit(&rq->semaphore); i915_sw_fence_commit(&rq->submit); } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index f57eadcf3583..fccc339949ec 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -26,6 +26,7 @@ #define I915_REQUEST_H #include <linux/dma-fence.h> +#include <linux/irq_work.h> #include <linux/lockdep.h> #include "gem/i915_gem_context_types.h" @@ -208,6 +209,7 @@ struct i915_request { }; struct list_head execute_cb; struct i915_sw_fence semaphore; + struct irq_work semaphore_work; /* * A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 5d96cfba40f8..34b654b4e58a 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -423,8 +423,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, if (!node_signaled(signal)) { INIT_LIST_HEAD(&dep->dfs_link); - list_add(&dep->wait_link, &signal->waiters_list); - list_add(&dep->signal_link, &node->signalers_list); dep->signaler = signal; dep->waiter = node; dep->flags = flags; @@ -434,6 +432,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, !node_started(signal)) node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; + /* All set, now publish. Beware the lockless walkers. */ + list_add(&dep->signal_link, &node->signalers_list); + list_add_rcu(&dep->wait_link, &signal->waiters_list); + /* * As we do not allow WAIT to preempt inflight requests, * once we have executed a request, along with triggering diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index c47261ae86ea..632d6953c78d 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -8,9 +8,8 @@ #include "i915_drv.h" #include "i915_utils.h" -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ - "providing the dmesg log by booting with drm.debug=0xf" +#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" +#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details." void __i915_printk(struct drm_i915_private *dev_priv, const char *level, diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index b0ade76bec90..d34141f7dcd8 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr) __idx; \ }) +static inline bool is_power_of_2_u64(u64 n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + static inline void __list_del_many(struct list_head *head, struct list_head *first) { diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index cbfb7171d62d..0648eda309e4 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915) * to the Valleyview P-unit and not all sideband communications. */ if (IS_VALLEYVIEW(i915)) { - pm_qos_update_request(&i915->sb_qos, 0); + cpu_latency_qos_update_request(&i915->sb_qos, 0); on_each_cpu(ping, NULL, 1); } } @@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915) static void __vlv_punit_put(struct drm_i915_private *i915) { if (IS_VALLEYVIEW(i915)) - pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->sb_qos, + PM_QOS_DEFAULT_VALUE); iosf_mbi_punit_release(); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 0dfcd1787e65..fe85e487e477 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -486,6 +486,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) } #if IS_REACHABLE(CONFIG_MTK_CMDQ) if (mtk_crtc->cmdq_client) { + mbox_flush(mtk_crtc->cmdq_client->chan, 2000); cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); @@ -636,10 +637,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { static int mtk_drm_crtc_init(struct drm_device *drm, struct mtk_drm_crtc *mtk_crtc, - struct drm_plane *primary, - struct drm_plane *cursor, unsigned int pipe) + unsigned int pipe) { - int ret; + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) + primary = &mtk_crtc->planes[i]; + else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) + cursor = &mtk_crtc->planes[i]; + } ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, &mtk_crtc_funcs, NULL); @@ -689,11 +698,12 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, } static inline -enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx) +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx, + unsigned int num_planes) { if (plane_idx == 0) return DRM_PLANE_TYPE_PRIMARY; - else if (plane_idx == 1) + else if (plane_idx == (num_planes - 1)) return DRM_PLANE_TYPE_CURSOR; else return DRM_PLANE_TYPE_OVERLAY; @@ -712,7 +722,8 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[mtk_crtc->layer_nr], BIT(pipe), - mtk_drm_crtc_plane_type(mtk_crtc->layer_nr), + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr, + num_planes), mtk_ddp_comp_supported_rotations(comp)); if (ret) return ret; @@ -807,9 +818,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, return ret; } - ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], - mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : - NULL, pipe); + ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe); if (ret < 0) return ret; @@ -828,7 +837,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, drm_crtc_index(&mtk_crtc->base)); mtk_crtc->cmdq_client = NULL; } - ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", + ret = of_property_read_u32_index(priv->mutex_node, + "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), &mtk_crtc->cmdq_event); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 1f5a112bb034..57c88de9a329 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -471,6 +471,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, /* Only DMA capable components need the LARB property */ comp->larb_dev = NULL; if (type != MTK_DISP_OVL && + type != MTK_DISP_OVL_2L && type != MTK_DISP_RDMA && type != MTK_DISP_WDMA) return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 914cc7619cd7..c2bd683a87c8 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -80,6 +80,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state; + int ret; if (plane != state->crtc->cursor) return -EINVAL; @@ -90,6 +91,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; + ret = mtk_drm_crtc_plane_check(state->crtc, plane, + to_mtk_plane_state(state)); + if (ret) + return ret; + if (state->state) crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); @@ -115,6 +121,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_y = new_state->src_y; plane->state->src_h = new_state->src_h; plane->state->src_w = new_state->src_w; + swap(plane->state->fb, new_state->fb); state->pending.async_dirty = true; mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 983afeaee737..748cd379065f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - struct msm_gpu *gpu = &adreno_gpu->base; u32 val; /* @@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) return; } - /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) - == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + a6xx_bus_clear_pending_transactions(adreno_gpu); /* tell the GMU we want to slumber */ a6xx_gmu_notify_slumber(gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index daf07800cde0..68af24150de5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); int ret; - /* - * During a previous slumber, GBIF halt is asserted to ensure - * no further transaction can go through GPU before GPU - * headswitch is turned off. - * - * This halt is deasserted once headswitch goes off but - * incase headswitch doesn't goes off clear GBIF halt - * here to ensure GPU wake-up doesn't fail because of - * halted GPU transactions. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); - /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); @@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Select CP0 to always count cycles */ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); - gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + if (adreno_is_a630(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + } /* Enable fault detection */ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, @@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), }; -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if(!a6xx_has_gbif(adreno_gpu)){ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* - * GMU needs DDR access in slumber path. Deassert GBIF halt now - * to allow for GMU to access system memory. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); - /* - * Make sure the GMU is idle before continuing (because some transitions - * may use VBIF - */ - a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); - - /* Clear the VBIF pipe before shutting down */ - /* FIXME: This accesses the GPU - do we need to make sure it is on? */ - a6xx_bus_clear_pending_transactions(adreno_gpu); - return a6xx_gmu_stop(a6xx_gpu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index eda11abc5f01..e450e0b97211 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -7,6 +7,7 @@ #include "a6xx_gmu.h" #include "a6xx_gmu.xml.h" +#include "a6xx_gpu.h" #define HFI_MSG_ID(val) [val] = #val @@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) NULL, 0); } -static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5003c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* - * The sdm845 GMU doesn't do bus frequency scaling on its own but it - * does need at least one entry in the list because it might be accessed - * when the GMU is shutting down. Send a single "off" entry. + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5007c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} - msg.bw_level_num = 1; +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; - msg.ddr_cmds_num = 3; - msg.ddr_wait_bitmask = 0x07; + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; - msg.ddr_cmds_addrs[0] = 0x50000; - msg.ddr_cmds_addrs[1] = 0x5005c; - msg.ddr_cmds_addrs[2] = 0x5000c; + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5005c; + msg->ddr_cmds_addrs[2] = 0x5000c; - msg.ddr_cmds_data[0][0] = 0x40000000; - msg.ddr_cmds_data[0][1] = 0x40000000; - msg.ddr_cmds_data[0][2] = 0x40000000; + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* * These are the CX (CNOC) votes. This is used but the values for the * sdm845 GMU are known and fixed so we can hard code them. */ - msg.cnoc_cmds_num = 3; - msg.cnoc_wait_bitmask = 0x05; + msg->cnoc_cmds_num = 3; + msg->cnoc_wait_bitmask = 0x05; - msg.cnoc_cmds_addrs[0] = 0x50034; - msg.cnoc_cmds_addrs[1] = 0x5007c; - msg.cnoc_cmds_addrs[2] = 0x5004c; + msg->cnoc_cmds_addrs[0] = 0x50034; + msg->cnoc_cmds_addrs[1] = 0x5007c; + msg->cnoc_cmds_addrs[2] = 0x5004c; - msg.cnoc_cmds_data[0][0] = 0x40000000; - msg.cnoc_cmds_data[0][1] = 0x00000000; - msg.cnoc_cmds_data[0][2] = 0x40000000; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[0][1] = 0x00000000; + msg->cnoc_cmds_data[0][2] = 0x40000000; + + msg->cnoc_cmds_data[1][0] = 0x60000001; + msg->cnoc_cmds_data[1][1] = 0x20000001; + msg->cnoc_cmds_data[1][2] = 0x60000001; +} + + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - msg.cnoc_cmds_data[1][0] = 0x60000001; - msg.cnoc_cmds_data[1][1] = 0x20000001; - msg.cnoc_cmds_data[1][2] = 0x60000001; + if (adreno_is_a618(adreno_gpu)) + a618_build_bw_table(&msg); + else + a6xx_build_bw_table(&msg); return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), NULL, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 528632690f1e..a05282dede91 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = { INTERLEAVED_RGB_FMT(RGB565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 29705e773a4b..80d3cfc14007 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -12,6 +12,7 @@ #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) +#define HW_REV 0x0 #define HW_INTR_STATUS 0x0010 /* Max BW defined in KBps */ @@ -22,6 +23,17 @@ struct dpu_irq_controller { struct irq_domain *domain; }; +struct dpu_hw_cfg { + u32 val; + u32 offset; +}; + +struct dpu_mdss_hw_init_handler { + u32 hw_rev; + u32 hw_reg_count; + struct dpu_hw_cfg* hw_cfg; +}; + struct dpu_mdss { struct msm_mdss base; void __iomem *mmio; @@ -32,6 +44,44 @@ struct dpu_mdss { u32 num_paths; }; +static struct dpu_hw_cfg hw_cfg[] = { + { + /* UBWC global settings */ + .val = 0x1E, + .offset = 0x144, + } +}; + +static struct dpu_mdss_hw_init_handler cfg_handler[] = { + { .hw_rev = DPU_HW_VER_620, + .hw_reg_count = ARRAY_SIZE(hw_cfg), + .hw_cfg = hw_cfg + }, +}; + +static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev) +{ + int i; + u32 count = 0; + struct dpu_hw_cfg *hw_cfg = NULL; + + for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { + if (cfg_handler[i].hw_rev == hw_rev) { + hw_cfg = cfg_handler[i].hw_cfg; + count = cfg_handler[i].hw_reg_count; + break; + } + } + + for (i = 0; i < count; i++ ) { + writel_relaxed(hw_cfg->val, + dpu_mdss->mmio + hw_cfg->offset); + hw_cfg++; + } + + return; +} + static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, struct dpu_mdss *dpu_mdss) { @@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dss_module_power *mp = &dpu_mdss->mp; int ret; + u32 mdss_rev; dpu_mdss_icc_request_bw(mdss); ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); - if (ret) + if (ret) { DPU_ERROR("clock enable failed, ret:%d\n", ret); + return ret; + } + + mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV); + dpu_mdss_hw_init(dpu_mdss, mdss_rev); return ret; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 05cc04f729d6..e1cc541e0ef2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", - mdp5_cstate->pipeline.mixer->lm); + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 104115d112eb..4864b9558f65 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return num; } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int id = dsi_mgr_connector_get_id(connector); @@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_pll *src_pll; bool is_dual_dsi = IS_DUAL_DSI(); int ret; @@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + /* Save PLL status if it is a clock source */ + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + msm_dsi_pll_save_state(src_pll); + ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index b0cfa67d2a57..f509ebd77500 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; - /* Save PLL status if it is a clock source */ - if (phy->usecase != MSM_DSI_PHY_SLAVE) - msm_dsi_pll_save_state(phy->pll); - phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 1c894548dd72..6ac04fc303f5 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) if (pll_10nm->slave) dsi_pll_enable_pll_bias(pll_10nm->slave); + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); + if (rc) { + pr_err("vco_set_rate failed, rc=%d\n", rc); + return rc; + } + /* Start PLL */ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x01); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 890315291b01..bb737f9281e6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -458,6 +458,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) asyw->clr.ntfy = armw->ntfy.handle != 0; asyw->clr.sema = armw->sema.handle != 0; asyw->clr.xlut = armw->xlut.handle != 0; + if (asyw->clr.xlut && asyw->visible) + asyw->set.xlut = asyw->xlut.handle != 0; asyw->clr.csc = armw->csc.valid; if (wndw->func->image_clr) asyw->clr.image = armw->image.handle[0] != 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index c7d700916eae..8ebbe1656008 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2579,6 +2579,7 @@ nv166_chipset = { static const struct nvkm_device_chip nv167_chipset = { .name = "TU117", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2607,6 +2608,7 @@ nv167_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, + .gr = tu102_gr_new, .nvdec[0] = gm107_nvdec_new, .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, @@ -2615,6 +2617,7 @@ nv167_chipset = { static const struct nvkm_device_chip nv168_chipset = { .name = "TU116", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2643,6 +2646,7 @@ nv168_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, + .gr = tu102_gr_new, .nvdec[0] = gm107_nvdec_new, .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index 454668b1cf54..a9efa4d78be9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -164,6 +164,32 @@ MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin"); MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin"); MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu117/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/tu116/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin"); + static const struct gf100_gr_fwif tu102_gr_fwif[] = { { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c index 7f4b89d82d32..d28d8f36ae24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -107,6 +107,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin"); MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/tu116/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu116/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu117/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu117/acr/ucode_unload.bin"); + static const struct nvkm_acr_hsf_fwif tu102_acr_unload_fwif[] = { { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, @@ -130,6 +136,8 @@ tu102_acr_asb_0 = { MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin"); MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin"); MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu116/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu117/acr/ucode_asb.bin"); static const struct nvkm_acr_hsf_fwif tu102_acr_asb_fwif[] = { @@ -154,6 +162,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin"); MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin"); MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin"); +MODULE_FIRMWARE("nvidia/tu116/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu116/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu117/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu117/acr/ucode_ahesasc.bin"); + static const struct nvkm_acr_hsf_fwif tu102_acr_ahesasc_fwif[] = { { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index 389bad312bf2..10ff5d053f7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -51,3 +51,5 @@ MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 7157dfd7dea3..9a1a72a748e7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -280,12 +280,8 @@ static void panfrost_job_cleanup(struct kref *ref) } if (job->bos) { - struct panfrost_gem_object *bo; - - for (i = 0; i < job->bo_count; i++) { - bo = to_panfrost_bo(job->bos[i]); + for (i = 0; i < job->bo_count; i++) drm_gem_object_put_unlocked(job->bos[i]); - } kvfree(job->bos); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 763cfca886a7..5d75f8cf6477 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) as = mmu->as; if (as >= 0) { int en = atomic_inc_return(&mmu->as_count); - WARN_ON(en >= NUM_JOB_SLOTS); + + /* + * AS can be retained by active jobs or a perfcnt context, + * hence the '+ 1' here. + */ + WARN_ON(en >= (NUM_JOB_SLOTS + 1)); list_move(&mmu->list, &pfdev->as_lru_list); goto out; @@ -596,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) source_id = (fault_status >> 16); /* Page fault only */ - if ((status & mask) == BIT(i)) { - WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); - + ret = -1; + if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0) ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); - if (!ret) { - mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); - status &= ~mask; - continue; - } - } - /* terminal fault, print info about the fault */ - dev_err(pfdev->dev, - "Unhandled Page fault in AS%d at VA 0x%016llX\n" - "Reason: %s\n" - "raw fault status: 0x%X\n" - "decoded fault status: %s\n" - "exception type 0x%X: %s\n" - "access type 0x%X: %s\n" - "source id 0x%X\n", - i, addr, - "TODO", - fault_status, - (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), - exception_type, panfrost_exception_name(pfdev, exception_type), - access_type, access_type_name(pfdev, fault_status), - source_id); + if (ret) + /* terminal fault, print info about the fault */ + dev_err(pfdev->dev, + "Unhandled Page fault in AS%d at VA 0x%016llX\n" + "Reason: %s\n" + "raw fault status: 0x%X\n" + "decoded fault status: %s\n" + "exception type 0x%X: %s\n" + "access type 0x%X: %s\n" + "source id 0x%X\n", + i, addr, + "TODO", + fault_status, + (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), + exception_type, panfrost_exception_name(pfdev, exception_type), + access_type, access_type_name(pfdev, fault_status), + source_id); mmu_write(pfdev, MMU_INT_CLEAR, mask); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 684820448be3..6913578d5aa7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_gem_shmem_object *bo; - u32 cfg; + u32 cfg, as; int ret; if (user == perfcnt->user) @@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, perfcnt->user = user; - /* - * Always use address space 0 for now. - * FIXME: this needs to be updated when we start using different - * address space. - */ - cfg = GPU_PERFCNT_CFG_AS(0) | + as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + cfg = GPU_PERFCNT_CFG_AS(as) | GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL); /* @@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_gem_mapping_put(perfcnt->mapping); perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index fd74e2611185..8696af1ee14d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -37,6 +37,7 @@ #include <linux/vga_switcheroo.h> #include <linux/mmu_notifier.h> +#include <drm/drm_agpsupport.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> @@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long flags = 0; + struct drm_device *dev; int ret; if (!ent) @@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) return ret; - return drm_get_pci_dev(pdev, ent, &kms_driver); + dev = drm_dev_alloc(&kms_driver, &pdev->dev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + ret = pci_enable_device(pdev); + if (ret) + goto err_free; + + dev->pdev = pdev; +#ifdef __alpha__ + dev->hose = pdev->sysdata; +#endif + + pci_set_drvdata(pdev, dev); + + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) + dev->agp = drm_agp_init(dev); + if (dev->agp) { + dev->agp->agp_mtrr = arch_phys_wc_add( + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * + 1024 * 1024); + } + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + goto err_agp; + + return 0; + +err_agp: + if (dev->agp) + arch_phys_wc_del(dev->agp->agp_mtrr); + kfree(dev->agp); + pci_disable_device(pdev); +err_free: + drm_dev_put(dev); + return ret; } static void @@ -575,7 +614,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER, + DRIVER_GEM | DRIVER_RENDER, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index d24f23a81656..dd2f19b8022b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -32,6 +32,7 @@ #include <linux/uaccess.h> #include <linux/vga_switcheroo.h> +#include <drm/drm_agpsupport.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev) radeon_modeset_fini(rdev); radeon_device_fini(rdev); + if (dev->agp) + arch_phys_wc_del(dev->agp->agp_mtrr); + kfree(dev->agp); + dev->agp = NULL; + done_free: kfree(rdev); dev->dev_private = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3b92311d30b9..b3380ffab4c2 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -528,7 +528,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) r = -ENOMEM; nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - if (nents != ttm->sg->nents) + if (nents == 0) goto release_sg; drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 71ce6215956f..60c4c6a1aac6 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -661,7 +661,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) trace_drm_sched_process_job(s_fence); + dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h index ceac7af9a172..29e367db6118 100644 --- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h +++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h @@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0) cmdline_test(drm_cmdline_test_rotate_90) cmdline_test(drm_cmdline_test_rotate_180) cmdline_test(drm_cmdline_test_rotate_270) +cmdline_test(drm_cmdline_test_rotate_multiple) cmdline_test(drm_cmdline_test_rotate_invalid_val) cmdline_test(drm_cmdline_test_rotate_truncated) cmdline_test(drm_cmdline_test_hmirror) diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c index 520f3e66a384..d96cd890def6 100644 --- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c +++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c @@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored) return 0; } +static int drm_cmdline_test_rotate_multiple(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90", + &no_connector, + &mode)); + + return 0; +} + static int drm_cmdline_test_rotate_invalid_val(void *ignored) { struct drm_cmdline_mode mode = { }; @@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored) FAIL_ON(!mode.specified); FAIL_ON(mode.xres != 720); FAIL_ON(mode.yres != 480); - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X); + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X)); FAIL_ON(mode.refresh_specified); @@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored) FAIL_ON(!mode.specified); FAIL_ON(mode.xres != 720); FAIL_ON(mode.yres != 480); - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y); + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y)); FAIL_ON(mode.refresh_specified); diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 7c24f8f832a5..4a64f7ae437a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -107,48 +107,128 @@ static const struct de2_fmt_info de2_formats[] = { .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XRGB4444, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_ABGR4444, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XBGR4444, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_RGBA4444, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_RGBX4444, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_BGRA4444, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_BGRX4444, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_ARGB1555, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XRGB1555, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_ABGR1555, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_XBGR1555, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_RGBA5551, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_RGBX5551, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_BGRA5551, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, .rgb = true, .csc = SUN8I_CSC_MODE_OFF, }, { + /* for DE2 VI layer which ignores alpha */ + .drm_fmt = DRM_FORMAT_BGRX5551, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_ARGB2101010, + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_ABGR2101010, + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_RGBA1010102, + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { + .drm_fmt = DRM_FORMAT_BGRA1010102, + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102, + .rgb = true, + .csc = SUN8I_CSC_MODE_OFF, + }, + { .drm_fmt = DRM_FORMAT_UYVY, .de2_fmt = SUN8I_MIXER_FBFMT_UYVY, .rgb = false, @@ -197,12 +277,6 @@ static const struct de2_fmt_info de2_formats[] = { .csc = SUN8I_CSC_MODE_YUV2RGB, }, { - .drm_fmt = DRM_FORMAT_YUV444, - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, - .rgb = true, - .csc = SUN8I_CSC_MODE_YUV2RGB, - }, - { .drm_fmt = DRM_FORMAT_YUV422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, .rgb = false, @@ -221,12 +295,6 @@ static const struct de2_fmt_info de2_formats[] = { .csc = SUN8I_CSC_MODE_YUV2RGB, }, { - .drm_fmt = DRM_FORMAT_YVU444, - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, - .rgb = true, - .csc = SUN8I_CSC_MODE_YVU2RGB, - }, - { .drm_fmt = DRM_FORMAT_YVU422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, .rgb = false, @@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = { .rgb = false, .csc = SUN8I_CSC_MODE_YVU2RGB, }, + { + .drm_fmt = DRM_FORMAT_P010, + .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV, + .rgb = false, + .csc = SUN8I_CSC_MODE_YUV2RGB, + }, + { + .drm_fmt = DRM_FORMAT_P210, + .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV, + .rgb = false, + .csc = SUN8I_CSC_MODE_YUV2RGB, + }, }; const struct de2_fmt_info *sun8i_mixer_format_info(u32 format) diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index c6cc94057faf..345b28b0a80a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -93,6 +93,10 @@ #define SUN8I_MIXER_FBFMT_ABGR1555 17 #define SUN8I_MIXER_FBFMT_RGBA5551 18 #define SUN8I_MIXER_FBFMT_BGRA5551 19 +#define SUN8I_MIXER_FBFMT_ARGB2101010 20 +#define SUN8I_MIXER_FBFMT_ABGR2101010 21 +#define SUN8I_MIXER_FBFMT_RGBA1010102 22 +#define SUN8I_MIXER_FBFMT_BGRA1010102 23 #define SUN8I_MIXER_FBFMT_YUYV 0 #define SUN8I_MIXER_FBFMT_UYVY 1 @@ -109,6 +113,13 @@ /* format 12 is semi-planar YUV411 UVUV */ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 +/* format 15 doesn't exist */ +/* format 16 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P010_YUV 17 +/* format 18 is P210 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 19 +/* format 20 is packed YVU444 10-bit */ +/* format 21 is packed YUV444 10-bit */ /* * Sub-engines listed bellow are unused for now. The EN registers are here only diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 42d445d23773..b8398ca18b0f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = { }; /* - * While all RGB formats are supported, VI planes don't support - * alpha blending, so there is no point having formats with alpha - * channel if their opaque analog exist. + * While DE2 VI layer supports same RGB formats as UI layer, alpha + * channel is ignored. This structure lists all unique variants + * where alpha channel is replaced with "don't care" (X) channel. */ static const u32 sun8i_vi_layer_formats[] = { + DRM_FORMAT_BGR565, + DRM_FORMAT_BGR888, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XRGB8888, + + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV61, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV411, + DRM_FORMAT_YUV420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU411, + DRM_FORMAT_YVU420, + DRM_FORMAT_YVU422, +}; + +static const u32 sun8i_vi_layer_de3_formats[] = { DRM_FORMAT_ABGR1555, + DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR4444, + DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB1555, + DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB4444, + DRM_FORMAT_ARGB8888, DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, + DRM_FORMAT_BGRA1010102, DRM_FORMAT_BGRA5551, DRM_FORMAT_BGRA4444, + DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, + DRM_FORMAT_RGBA1010102, DRM_FORMAT_RGBA4444, DRM_FORMAT_RGBA5551, + DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, @@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_NV61, + DRM_FORMAT_P010, + DRM_FORMAT_P210, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, @@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = { DRM_FORMAT_YUV411, DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, - DRM_FORMAT_YUV444, DRM_FORMAT_YVU411, DRM_FORMAT_YVU420, DRM_FORMAT_YVU422, - DRM_FORMAT_YVU444, }; struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, @@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, int index) { u32 supported_encodings, supported_ranges; + unsigned int plane_cnt, format_count; struct sun8i_vi_layer *layer; - unsigned int plane_cnt; + const u32 *formats; int ret; layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); if (!layer) return ERR_PTR(-ENOMEM); + if (mixer->cfg->is_de3) { + formats = sun8i_vi_layer_de3_formats; + format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats); + } else { + formats = sun8i_vi_layer_formats; + format_count = ARRAY_SIZE(sun8i_vi_layer_formats); + } + /* possible crtcs are set later */ ret = drm_universal_plane_init(drm, &layer->plane, 0, &sun8i_vi_layer_funcs, - sun8i_vi_layer_formats, - ARRAY_SIZE(sun8i_vi_layer_formats), + formats, format_count, NULL, DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { dev_err(drm->dev, "Couldn't initialize layer\n"); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 49ed55779128..953c82a4f573 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -515,6 +515,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base.base.resv = &fbo->base.base._resv; dma_resv_init(&fbo->base.base._resv); + fbo->base.base.dev = NULL; ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 017a9e0fc3bb..3af7ec80c7da 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, * "f91a9dd35715 Fix unlinking resources from hash * table." (Feb 2019) fixes the bug. */ - static int handle; - handle++; + static atomic_t seqno = ATOMIC_INIT(0); + int handle = atomic_inc_return(&seqno); *resid = handle + 1; } else { int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); @@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, return NULL; bo->base.base.funcs = &virtio_gpu_gem_funcs; + bo->base.map_cached = true; return &bo->base.base; } diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index ae79a7c66737..fa704153cb00 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) if (data->has_sp) { input2 = input_allocate_device(); if (!input2) { - input_free_device(input2); + ret = -ENOMEM; goto exit; } diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 6ac8becc2372..d732d1d10caf 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, unsigned long **bit, int *max) { if (usage->hid == (HID_UP_CUSTOM | 0x0003) || - usage->hid == (HID_UP_MSVENDOR | 0x0003)) { + usage->hid == (HID_UP_MSVENDOR | 0x0003) || + usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index 3f6abd190df4..db6da21ade06 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = { struct bigben_device { struct hid_device *hid; struct hid_report *report; + bool removed; u8 led_state; /* LED1 = 1 .. LED4 = 8 */ u8 right_motor_on; /* right motor off/on 0/1 */ u8 left_motor_force; /* left motor force 0-255 */ @@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work) struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; + if (bigben->removed) + return; + if (bigben->work_led) { bigben->work_led = false; report_field->value[0] = 0x01; /* 1 = led message */ @@ -220,10 +224,16 @@ static void bigben_worker(struct work_struct *work) static int hid_bigben_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { - struct bigben_device *bigben = data; + struct hid_device *hid = input_get_drvdata(dev); + struct bigben_device *bigben = hid_get_drvdata(hid); u8 right_motor_on; u8 left_motor_force; + if (!bigben) { + hid_err(hid, "no device data\n"); + return 0; + } + if (effect->type != FF_RUMBLE) return 0; @@ -298,8 +308,8 @@ static void bigben_remove(struct hid_device *hid) { struct bigben_device *bigben = hid_get_drvdata(hid); + bigben->removed = true; cancel_work_sync(&bigben->worker); - hid_hw_close(hid); hid_hw_stop(hid); } @@ -319,6 +329,7 @@ static int bigben_probe(struct hid_device *hid, return -ENOMEM; hid_set_drvdata(hid, bigben); bigben->hid = hid; + bigben->removed = false; error = hid_parse(hid); if (error) { @@ -341,10 +352,10 @@ static int bigben_probe(struct hid_device *hid, INIT_WORK(&bigben->worker, bigben_worker); - error = input_ff_create_memless(hidinput->input, bigben, + error = input_ff_create_memless(hidinput->input, NULL, hid_bigben_play_effect); if (error) - return error; + goto error_hw_stop; name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1; @@ -354,8 +365,10 @@ static int bigben_probe(struct hid_device *hid, sizeof(struct led_classdev) + name_sz, GFP_KERNEL ); - if (!led) - return -ENOMEM; + if (!led) { + error = -ENOMEM; + goto error_hw_stop; + } name = (void *)(&led[1]); snprintf(name, name_sz, "%s:red:bigben%d", @@ -369,7 +382,7 @@ static int bigben_probe(struct hid_device *hid, bigben->leds[n] = led; error = devm_led_classdev_register(&hid->dev, led); if (error) - return error; + goto error_hw_stop; } /* initial state: LED1 is on, no rumble effect */ @@ -383,6 +396,10 @@ static int bigben_probe(struct hid_device *hid, hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); return 0; + +error_hw_stop: + hid_hw_stop(hid); + return error; } static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 851fe54ea59e..359616e3efbb 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1741,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, rsize = ((report->size - 1) >> 3) + 1; - if (rsize > HID_MAX_BUFFER_SIZE) + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) + rsize = HID_MAX_BUFFER_SIZE - 1; + else if (rsize > HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE; if (csize < rsize) { diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 2aa4ed157aec..85a054f1ce38 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -533,6 +533,8 @@ static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) }, diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index dddfca555df9..0b6ee1dee625 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -193,8 +193,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, goto cleanup; /* The pointer is not NULL when we resume from hibernation */ - if (input_device->hid_desc != NULL) - kfree(input_device->hid_desc); + kfree(input_device->hid_desc); input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC); if (!input_device->hid_desc) @@ -207,8 +206,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, } /* The pointer is not NULL when we resume from hibernation */ - if (input_device->report_desc != NULL) - kfree(input_device->report_desc); + kfree(input_device->report_desc); input_device->report_desc = kzalloc(input_device->report_desc_size, GFP_ATOMIC); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 3a400ce603c4..9f2213426556 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -478,6 +478,7 @@ #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030 #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d +#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -726,6 +727,7 @@ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index c436e12feb23..6c55682c5974 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -41,8 +41,9 @@ static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ - { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, - USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 70e1cb928bf0..094f4f1b6555 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1256,36 +1256,35 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, { int status; - long charge_sts = (long)data[2]; + long flags = (long) data[2]; - *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; - switch (data[2] & 0xe0) { - case 0x00: - status = POWER_SUPPLY_STATUS_CHARGING; - break; - case 0x20: - status = POWER_SUPPLY_STATUS_FULL; - *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; - break; - case 0x40: + if (flags & 0x80) + switch (flags & 0x07) { + case 0: + status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 1: + status = POWER_SUPPLY_STATUS_FULL; + *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + break; + case 2: + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + default: + status = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } + else status = POWER_SUPPLY_STATUS_DISCHARGING; - break; - case 0xe0: - status = POWER_SUPPLY_STATUS_NOT_CHARGING; - break; - default: - status = POWER_SUPPLY_STATUS_UNKNOWN; - } *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; - if (test_bit(3, &charge_sts)) { + if (test_bit(3, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; } - if (test_bit(4, &charge_sts)) { + if (test_bit(4, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; } - - if (test_bit(5, &charge_sts)) { + if (test_bit(5, &flags)) { *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; } diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c index a549c42e8c90..33c102a60992 100644 --- a/drivers/hid/hid-picolcd_fb.c +++ b/drivers/hid/hid-picolcd_fb.c @@ -458,9 +458,9 @@ static ssize_t picolcd_fb_update_rate_show(struct device *dev, if (ret >= PAGE_SIZE) break; else if (i == fb_update_rate) - ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i); + ret += scnprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i); else - ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i); + ret += scnprintf(buf+ret, PAGE_SIZE-ret, "%u ", i); if (ret > 0) buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n'; return ret; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 0e7b2d998395..3735546bb524 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -103,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET }, diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index fb827c295842..4d25577a8573 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -313,7 +313,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, while (i < ret) { if (i + attribute->size > ret) { - len += snprintf(&buf[len], + len += scnprintf(&buf[len], PAGE_SIZE - len, "%d ", values[i]); break; @@ -336,10 +336,10 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, ++i; break; } - len += snprintf(&buf[len], PAGE_SIZE - len, + len += scnprintf(&buf[len], PAGE_SIZE - len, "%lld ", value); } - len += snprintf(&buf[len], PAGE_SIZE - len, "\n"); + len += scnprintf(&buf[len], PAGE_SIZE - len, "\n"); return len; } else if (input) diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index d31ea82b84c1..a66f08041a1a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -342,6 +342,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { .driver_data = (void *)&sipodev_desc }, { + .ident = "Trekstor SURFBOOK E11B", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"), + }, + .driver_data = (void *)&sipodev_desc + }, + { .ident = "Direkt-Tek DTLAPY116-2", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index a970b809d778..4140dea693e9 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -932,9 +932,9 @@ void hiddev_disconnect(struct hid_device *hid) hiddev->exist = 0; if (hiddev->open) { - mutex_unlock(&hiddev->existancelock); hid_hw_close(hiddev->hid); wake_up_interruptible(&hiddev->wait); + mutex_unlock(&hiddev->existancelock); } else { mutex_unlock(&hiddev->existancelock); kfree(hiddev); diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 9eec970cdfa5..89869c66fb9d 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -965,14 +965,13 @@ static int cs_hsi_buf_config(struct cs_hsi_iface *hi, if (old_state != hi->iface_state) { if (hi->iface_state == CS_STATE_CONFIGURED) { - pm_qos_add_request(&hi->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, + cpu_latency_qos_add_request(&hi->pm_qos_req, CS_QOS_LATENCY_FOR_DATA_USEC); local_bh_disable(); cs_hsi_read_on_data(hi); local_bh_enable(); } else if (old_state == CS_STATE_CONFIGURED) { - pm_qos_remove_request(&hi->pm_qos_req); + cpu_latency_qos_remove_request(&hi->pm_qos_req); } } return r; @@ -1075,8 +1074,8 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi) WARN_ON(!cs_state_idle(hi->control_state)); WARN_ON(!cs_state_idle(hi->data_state)); - if (pm_qos_request_active(&hi->pm_qos_req)) - pm_qos_remove_request(&hi->pm_qos_req); + if (cpu_latency_qos_request_active(&hi->pm_qos_req)) + cpu_latency_qos_remove_request(&hi->pm_qos_req); spin_lock_bh(&hi->lock); cs_hsi_free_data(hi); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 47ac20aee06f..05a30832c6ba 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -280,6 +280,15 @@ config SENSORS_ASC7621 This driver can also be built as a module. If so, the module will be called asc7621. +config SENSORS_AXI_FAN_CONTROL + tristate "Analog Devices FAN Control HDL Core driver" + help + If you say yes here you get support for the Analog Devices + AXI HDL FAN monitoring core. + + This driver can also be built as a module. If so, the module + will be called axi-fan-control + config SENSORS_K8TEMP tristate "AMD Athlon64/FX or Opteron temperature sensor" depends on X86 && PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 613f50987965..b0b9c8e57176 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o +obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 4cf25458f0b9..0db8ef4fd6e1 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -355,7 +355,9 @@ static ssize_t show_str(struct device *dev, struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; acpi_string val; + int ret; + mutex_lock(&resource->lock); switch (attr->index) { case 0: val = resource->model_number; @@ -372,8 +374,9 @@ static ssize_t show_str(struct device *dev, val = ""; break; } - - return sprintf(buf, "%s\n", val); + ret = sprintf(buf, "%s\n", val); + mutex_unlock(&resource->lock); + return ret; } static ssize_t show_val(struct device *dev, @@ -817,11 +820,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) resource = acpi_driver_data(device); - mutex_lock(&resource->lock); switch (event) { case METER_NOTIFY_CONFIG: + mutex_lock(&resource->lock); free_capabilities(resource); res = read_capabilities(resource); + mutex_unlock(&resource->lock); if (res) break; @@ -830,15 +834,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) break; case METER_NOTIFY_TRIP: sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME); - update_meter(resource); break; case METER_NOTIFY_CAP: sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME); - update_cap(resource); break; case METER_NOTIFY_INTERVAL: sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME); - update_avg_interval(resource); break; case METER_NOTIFY_CAPPING: sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME); @@ -848,7 +849,6 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) WARN(1, "Unexpected event %d\n", event); break; } - mutex_unlock(&resource->lock); acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS, dev_name(&device->dev), event, 0); @@ -912,8 +912,8 @@ static int acpi_power_meter_remove(struct acpi_device *device) resource = acpi_driver_data(device); hwmon_device_unregister(resource->hwmon_dev); - free_capabilities(resource); remove_attrs(resource); + free_capabilities(resource); kfree(resource); return 0; diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 9632e2e3c4bb..319a0519ebdb 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which) return 0x95; break; } - return -ENODEV; + return 0; } /* Provide labels for sysfs */ diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 01c2eeb02aa9..054080443b47 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -19,6 +19,7 @@ #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/jiffies.h> +#include <linux/of.h> #include <linux/util_macros.h> /* Indexes for the sysfs hooks */ @@ -193,6 +194,7 @@ struct adt7475_data { unsigned long measure_updated; bool valid; + u8 config2; u8 config4; u8 config5; u8 has_voltage; @@ -1458,6 +1460,85 @@ static int adt7475_update_limits(struct i2c_client *client) return 0; } +static int set_property_bit(const struct i2c_client *client, char *property, + u8 *config, u8 bit_index) +{ + u32 prop_value = 0; + int ret = of_property_read_u32(client->dev.of_node, property, + &prop_value); + + if (!ret) { + if (prop_value) + *config |= (1 << bit_index); + else + *config &= ~(1 << bit_index); + } + + return ret; +} + +static int load_attenuators(const struct i2c_client *client, int chip, + struct adt7475_data *data) +{ + int ret; + + if (chip == adt7476 || chip == adt7490) { + set_property_bit(client, "adi,bypass-attenuator-in0", + &data->config4, 4); + set_property_bit(client, "adi,bypass-attenuator-in1", + &data->config4, 5); + set_property_bit(client, "adi,bypass-attenuator-in3", + &data->config4, 6); + set_property_bit(client, "adi,bypass-attenuator-in4", + &data->config4, 7); + + ret = i2c_smbus_write_byte_data(client, REG_CONFIG4, + data->config4); + if (ret < 0) + return ret; + } else if (chip == adt7473 || chip == adt7475) { + set_property_bit(client, "adi,bypass-attenuator-in1", + &data->config2, 5); + + ret = i2c_smbus_write_byte_data(client, REG_CONFIG2, + data->config2); + if (ret < 0) + return ret; + } + + return 0; +} + +static int adt7475_set_pwm_polarity(struct i2c_client *client) +{ + u32 states[ADT7475_PWM_COUNT]; + int ret, i; + u8 val; + + ret = of_property_read_u32_array(client->dev.of_node, + "adi,pwm-active-state", states, + ARRAY_SIZE(states)); + if (ret) + return ret; + + for (i = 0; i < ADT7475_PWM_COUNT; i++) { + ret = adt7475_read(PWM_CONFIG_REG(i)); + if (ret < 0) + return ret; + val = ret; + if (states[i]) + val &= ~BIT(4); + else + val |= BIT(4); + + ret = i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(i), val); + if (ret) + return ret; + } + + return 0; +} + static int adt7475_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1472,7 +1553,7 @@ static int adt7475_probe(struct i2c_client *client, struct adt7475_data *data; struct device *hwmon_dev; int i, ret = 0, revision, group_num = 0; - u8 config2, config3; + u8 config3; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (data == NULL) @@ -1546,8 +1627,12 @@ static int adt7475_probe(struct i2c_client *client, } /* Voltage attenuators can be bypassed, globally or individually */ - config2 = adt7475_read(REG_CONFIG2); - if (config2 & CONFIG2_ATTN) { + data->config2 = adt7475_read(REG_CONFIG2); + ret = load_attenuators(client, chip, data); + if (ret) + dev_warn(&client->dev, "Error configuring attenuator bypass\n"); + + if (data->config2 & CONFIG2_ATTN) { data->bypass_attn = (0x3 << 3) | 0x3; } else { data->bypass_attn = ((data->config4 & CONFIG4_ATTN_IN10) >> 4) | @@ -1562,6 +1647,10 @@ static int adt7475_probe(struct i2c_client *client, for (i = 0; i < ADT7475_PWM_COUNT; i++) adt7475_read_pwm(client, i); + ret = adt7475_set_pwm_polarity(client); + if (ret && ret != -EINVAL) + dev_warn(&client->dev, "Error configuring pwm polarity\n"); + /* Start monitoring */ switch (chip) { case adt7475: diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c new file mode 100644 index 000000000000..38d9cdb3db1a --- /dev/null +++ b/drivers/hwmon/axi-fan-control.c @@ -0,0 +1,469 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Fan Control HDL CORE driver + * + * Copyright 2019 Analog Devices Inc. + */ +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/fpga/adi-axi-common.h> +#include <linux/hwmon.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff) +#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff) +#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff) + +/* register map */ +#define ADI_REG_RSTN 0x0080 +#define ADI_REG_PWM_WIDTH 0x0084 +#define ADI_REG_TACH_PERIOD 0x0088 +#define ADI_REG_TACH_TOLERANCE 0x008c +#define ADI_REG_PWM_PERIOD 0x00c0 +#define ADI_REG_TACH_MEASUR 0x00c4 +#define ADI_REG_TEMPERATURE 0x00c8 + +#define ADI_REG_IRQ_MASK 0x0040 +#define ADI_REG_IRQ_PENDING 0x0044 +#define ADI_REG_IRQ_SRC 0x0048 + +/* IRQ sources */ +#define ADI_IRQ_SRC_PWM_CHANGED BIT(0) +#define ADI_IRQ_SRC_TACH_ERR BIT(1) +#define ADI_IRQ_SRC_TEMP_INCREASE BIT(2) +#define ADI_IRQ_SRC_NEW_MEASUR BIT(3) +#define ADI_IRQ_SRC_MASK GENMASK(3, 0) +#define ADI_IRQ_MASK_OUT_ALL 0xFFFFFFFFU + +#define SYSFS_PWM_MAX 255 + +struct axi_fan_control_data { + void __iomem *base; + struct device *hdev; + unsigned long clk_rate; + int irq; + /* pulses per revolution */ + u32 ppr; + bool hw_pwm_req; + bool update_tacho_params; + u8 fan_fault; +}; + +static inline void axi_iowrite(const u32 val, const u32 reg, + const struct axi_fan_control_data *ctl) +{ + iowrite32(val, ctl->base + reg); +} + +static inline u32 axi_ioread(const u32 reg, + const struct axi_fan_control_data *ctl) +{ + return ioread32(ctl->base + reg); +} + +static long axi_fan_control_get_pwm_duty(const struct axi_fan_control_data *ctl) +{ + u32 pwm_width = axi_ioread(ADI_REG_PWM_WIDTH, ctl); + u32 pwm_period = axi_ioread(ADI_REG_PWM_PERIOD, ctl); + /* + * PWM_PERIOD is a RO register set by the core. It should never be 0. + * For now we are trusting the HW... + */ + return DIV_ROUND_CLOSEST(pwm_width * SYSFS_PWM_MAX, pwm_period); +} + +static int axi_fan_control_set_pwm_duty(const long val, + struct axi_fan_control_data *ctl) +{ + u32 pwm_period = axi_ioread(ADI_REG_PWM_PERIOD, ctl); + u32 new_width; + long __val = clamp_val(val, 0, SYSFS_PWM_MAX); + + new_width = DIV_ROUND_CLOSEST(__val * pwm_period, SYSFS_PWM_MAX); + + axi_iowrite(new_width, ADI_REG_PWM_WIDTH, ctl); + + return 0; +} + +static long axi_fan_control_get_fan_rpm(const struct axi_fan_control_data *ctl) +{ + const u32 tach = axi_ioread(ADI_REG_TACH_MEASUR, ctl); + + if (tach == 0) + /* should we return error, EAGAIN maybe? */ + return 0; + /* + * The tacho period should be: + * TACH = 60/(ppr * rpm), where rpm is revolutions per second + * and ppr is pulses per revolution. + * Given the tacho period, we can multiply it by the input clock + * so that we know how many clocks we need to have this period. + * From this, we can derive the RPM value. + */ + return DIV_ROUND_CLOSEST(60 * ctl->clk_rate, ctl->ppr * tach); +} + +static int axi_fan_control_read_temp(struct device *dev, u32 attr, long *val) +{ + struct axi_fan_control_data *ctl = dev_get_drvdata(dev); + long raw_temp; + + switch (attr) { + case hwmon_temp_input: + raw_temp = axi_ioread(ADI_REG_TEMPERATURE, ctl); + /* + * The formula for the temperature is: + * T = (ADC * 501.3743 / 2^bits) - 273.6777 + * It's multiplied by 1000 to have millidegrees as + * specified by the hwmon sysfs interface. + */ + *val = ((raw_temp * 501374) >> 16) - 273677; + return 0; + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_read_fan(struct device *dev, u32 attr, long *val) +{ + struct axi_fan_control_data *ctl = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_fan_fault: + *val = ctl->fan_fault; + /* clear it now */ + ctl->fan_fault = 0; + return 0; + case hwmon_fan_input: + *val = axi_fan_control_get_fan_rpm(ctl); + return 0; + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_read_pwm(struct device *dev, u32 attr, long *val) +{ + struct axi_fan_control_data *ctl = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_pwm_input: + *val = axi_fan_control_get_pwm_duty(ctl); + return 0; + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_write_pwm(struct device *dev, u32 attr, long val) +{ + struct axi_fan_control_data *ctl = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_pwm_input: + return axi_fan_control_set_pwm_duty(val, ctl); + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + switch (type) { + case hwmon_fan: + *str = "FAN"; + return 0; + case hwmon_temp: + *str = "SYSMON4"; + return 0; + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_fan: + return axi_fan_control_read_fan(dev, attr, val); + case hwmon_pwm: + return axi_fan_control_read_pwm(dev, attr, val); + case hwmon_temp: + return axi_fan_control_read_temp(dev, attr, val); + default: + return -ENOTSUPP; + } +} + +static int axi_fan_control_write(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + switch (type) { + case hwmon_pwm: + return axi_fan_control_write_pwm(dev, attr, val); + default: + return -ENOTSUPP; + } +} + +static umode_t axi_fan_control_fan_is_visible(const u32 attr) +{ + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_fault: + case hwmon_fan_label: + return 0444; + default: + return 0; + } +} + +static umode_t axi_fan_control_pwm_is_visible(const u32 attr) +{ + switch (attr) { + case hwmon_pwm_input: + return 0644; + default: + return 0; + } +} + +static umode_t axi_fan_control_temp_is_visible(const u32 attr) +{ + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_label: + return 0444; + default: + return 0; + } +} + +static umode_t axi_fan_control_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_fan: + return axi_fan_control_fan_is_visible(attr); + case hwmon_pwm: + return axi_fan_control_pwm_is_visible(attr); + case hwmon_temp: + return axi_fan_control_temp_is_visible(attr); + default: + return 0; + } +} + +/* + * This core has two main ways of changing the PWM duty cycle. It is done, + * either by a request from userspace (writing on pwm1_input) or by the + * core itself. When the change is done by the core, it will use predefined + * parameters to evaluate the tach signal and, on that case we cannot set them. + * On the other hand, when the request is done by the user, with some arbitrary + * value that the core does not now about, we have to provide the tach + * parameters so that, the core can evaluate the signal. On the IRQ handler we + * distinguish this by using the ADI_IRQ_SRC_TEMP_INCREASE interrupt. This tell + * us that the CORE requested a new duty cycle. After this, there is 5s delay + * on which the core waits for the fan rotation speed to stabilize. After this + * we get ADI_IRQ_SRC_PWM_CHANGED irq where we will decide if we need to set + * the tach parameters or not on the next tach measurement cycle (corresponding + * already to the ney duty cycle) based on the %ctl->hw_pwm_req flag. + */ +static irqreturn_t axi_fan_control_irq_handler(int irq, void *data) +{ + struct axi_fan_control_data *ctl = (struct axi_fan_control_data *)data; + u32 irq_pending = axi_ioread(ADI_REG_IRQ_PENDING, ctl); + u32 clear_mask; + + if (irq_pending & ADI_IRQ_SRC_NEW_MEASUR) { + if (ctl->update_tacho_params) { + u32 new_tach = axi_ioread(ADI_REG_TACH_MEASUR, ctl); + + /* get 25% tolerance */ + u32 tach_tol = DIV_ROUND_CLOSEST(new_tach * 25, 100); + /* set new tacho parameters */ + axi_iowrite(new_tach, ADI_REG_TACH_PERIOD, ctl); + axi_iowrite(tach_tol, ADI_REG_TACH_TOLERANCE, ctl); + ctl->update_tacho_params = false; + } + } + + if (irq_pending & ADI_IRQ_SRC_PWM_CHANGED) { + /* + * if the pwm changes on behalf of software, + * we need to provide new tacho parameters to the core. + * Wait for the next measurement for that... + */ + if (!ctl->hw_pwm_req) { + ctl->update_tacho_params = true; + } else { + ctl->hw_pwm_req = false; + sysfs_notify(&ctl->hdev->kobj, NULL, "pwm1"); + } + } + + if (irq_pending & ADI_IRQ_SRC_TEMP_INCREASE) + /* hardware requested a new pwm */ + ctl->hw_pwm_req = true; + + if (irq_pending & ADI_IRQ_SRC_TACH_ERR) + ctl->fan_fault = 1; + + /* clear all interrupts */ + clear_mask = irq_pending & ADI_IRQ_SRC_MASK; + axi_iowrite(clear_mask, ADI_REG_IRQ_PENDING, ctl); + + return IRQ_HANDLED; +} + +static int axi_fan_control_init(struct axi_fan_control_data *ctl, + const struct device_node *np) +{ + int ret; + + /* get fan pulses per revolution */ + ret = of_property_read_u32(np, "pulses-per-revolution", &ctl->ppr); + if (ret) + return ret; + + /* 1, 2 and 4 are the typical and accepted values */ + if (ctl->ppr != 1 && ctl->ppr != 2 && ctl->ppr != 4) + return -EINVAL; + /* + * Enable all IRQs + */ + axi_iowrite(ADI_IRQ_MASK_OUT_ALL & + ~(ADI_IRQ_SRC_NEW_MEASUR | ADI_IRQ_SRC_TACH_ERR | + ADI_IRQ_SRC_PWM_CHANGED | ADI_IRQ_SRC_TEMP_INCREASE), + ADI_REG_IRQ_MASK, ctl); + + /* bring the device out of reset */ + axi_iowrite(0x01, ADI_REG_RSTN, ctl); + + return ret; +} + +static const struct hwmon_channel_info *axi_fan_control_info[] = { + HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT), + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_LABEL), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops axi_fan_control_hwmon_ops = { + .is_visible = axi_fan_control_is_visible, + .read = axi_fan_control_read, + .write = axi_fan_control_write, + .read_string = axi_fan_control_read_labels, +}; + +static const struct hwmon_chip_info axi_chip_info = { + .ops = &axi_fan_control_hwmon_ops, + .info = axi_fan_control_info, +}; + +static const u32 version_1_0_0 = ADI_AXI_PCORE_VER(1, 0, 'a'); + +static const struct of_device_id axi_fan_control_of_match[] = { + { .compatible = "adi,axi-fan-control-1.00.a", + .data = (void *)&version_1_0_0}, + {}, +}; +MODULE_DEVICE_TABLE(of, axi_fan_control_of_match); + +static int axi_fan_control_probe(struct platform_device *pdev) +{ + struct axi_fan_control_data *ctl; + struct clk *clk; + const struct of_device_id *id; + const char *name = "axi_fan_control"; + u32 version; + int ret; + + id = of_match_node(axi_fan_control_of_match, pdev->dev.of_node); + if (!id) + return -EINVAL; + + ctl = devm_kzalloc(&pdev->dev, sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + ctl->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctl->base)) + return PTR_ERR(ctl->base); + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "clk_get failed with %ld\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ctl->clk_rate = clk_get_rate(clk); + if (!ctl->clk_rate) + return -EINVAL; + + version = axi_ioread(ADI_AXI_REG_VERSION, ctl); + if (ADI_AXI_PCORE_VER_MAJOR(version) != + ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data))) { + dev_err(&pdev->dev, "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", + ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data)), + ADI_AXI_PCORE_VER_MINOR((*(u32 *)id->data)), + ADI_AXI_PCORE_VER_PATCH((*(u32 *)id->data)), + ADI_AXI_PCORE_VER_MAJOR(version), + ADI_AXI_PCORE_VER_MINOR(version), + ADI_AXI_PCORE_VER_PATCH(version)); + return -ENODEV; + } + + ctl->irq = platform_get_irq(pdev, 0); + if (ctl->irq < 0) + return ctl->irq; + + ret = devm_request_threaded_irq(&pdev->dev, ctl->irq, NULL, + axi_fan_control_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + pdev->driver_override, ctl); + if (ret) { + dev_err(&pdev->dev, "failed to request an irq, %d", ret); + return ret; + } + + ret = axi_fan_control_init(ctl, pdev->dev.of_node); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize device\n"); + return ret; + } + + ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev, + name, + ctl, + &axi_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(ctl->hdev); +} + +static struct platform_driver axi_fan_control_driver = { + .driver = { + .name = "axi_fan_control_driver", + .of_match_table = axi_fan_control_of_match, + }, + .probe = axi_fan_control_probe, +}; +module_platform_driver(axi_fan_control_driver); + +MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); +MODULE_DESCRIPTION("Analog Devices Fan Control HDL CORE driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index d855c78fb8be..bb9211215a68 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -709,7 +709,7 @@ static int coretemp_cpu_offline(unsigned int cpu) return 0; } static const struct x86_cpu_id __initconst coretemp_ids[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, + X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_DTHERM, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index d05ab713566d..a4ec85207782 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -219,7 +219,7 @@ struct aem_read_sensor_req { struct aem_read_sensor_resp { struct aem_iana_id id; - u8 bytes[0]; + u8 bytes[]; } __packed; /* Data structures to talk to the IPMI layer */ diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index 0e525cfbdfc5..a750647e66a4 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -186,7 +186,7 @@ static void make_sensor_label(struct device_node *np, u32 id; size_t n; - n = snprintf(sdata->label, sizeof(sdata->label), "%s", label); + n = scnprintf(sdata->label, sizeof(sdata->label), "%s", label); /* * Core temp pretty print @@ -199,11 +199,11 @@ static void make_sensor_label(struct device_node *np, * The digital thermal sensors are associated * with a core. */ - n += snprintf(sdata->label + n, + n += scnprintf(sdata->label + n, sizeof(sdata->label) - n, " %d", cpuid); else - n += snprintf(sdata->label + n, + n += scnprintf(sdata->label + n, sizeof(sdata->label) - n, " phy%d", id); } @@ -211,7 +211,7 @@ static void make_sensor_label(struct device_node *np, * Membuffer pretty print */ if (!of_property_read_u32(np, "ibm,chip-id", &id)) - n += snprintf(sdata->label + n, sizeof(sdata->label) - n, + n += scnprintf(sdata->label + n, sizeof(sdata->label) - n, " %d", id & 0xffff); } diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index e39354ffe973..3f37d5d81fe4 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -96,13 +96,20 @@ struct k10temp_data { void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; u32 temp_adjust_mask; - bool show_tdie; - u32 show_tccd; + u32 show_temp; u32 svi_addr[2]; + bool is_zen; bool show_current; int cfactor[2]; }; +#define TCTL_BIT 0 +#define TDIE_BIT 1 +#define TCCD_BIT(x) ((x) + 2) + +#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) +#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) + struct tctl_offset { u8 model; char const *id; @@ -180,8 +187,8 @@ static long get_raw_temp(struct k10temp_data *data) } const char *k10temp_temp_label[] = { - "Tdie", "Tctl", + "Tdie", "Tccd1", "Tccd2", "Tccd3", @@ -269,13 +276,13 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, switch (attr) { case hwmon_temp_input: switch (channel) { - case 0: /* Tdie */ - *val = get_raw_temp(data) - data->temp_offset; + case 0: /* Tctl */ + *val = get_raw_temp(data); if (*val < 0) *val = 0; break; - case 1: /* Tctl */ - *val = get_raw_temp(data); + case 1: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; if (*val < 0) *val = 0; break; @@ -333,23 +340,11 @@ static umode_t k10temp_is_visible(const void *_data, case hwmon_temp: switch (attr) { case hwmon_temp_input: - switch (channel) { - case 0: /* Tdie, or Tctl if we don't show it */ - break; - case 1: /* Tctl */ - if (!data->show_tdie) - return 0; - break; - case 2 ... 9: /* Tccd{1-8} */ - if (!(data->show_tccd & BIT(channel - 2))) - return 0; - break; - default: + if (!HAVE_TEMP(data, channel)) return 0; - } break; case hwmon_temp_max: - if (channel || data->show_tdie) + if (channel || data->is_zen) return 0; break; case hwmon_temp_crit: @@ -368,20 +363,9 @@ static umode_t k10temp_is_visible(const void *_data, return 0; break; case hwmon_temp_label: - /* No labels if we don't show the die temperature */ - if (!data->show_tdie) - return 0; - switch (channel) { - case 0: /* Tdie */ - case 1: /* Tctl */ - break; - case 2 ... 9: /* Tccd{1-8} */ - if (!(data->show_tccd & BIT(channel - 2))) - return 0; - break; - default: + /* Show temperature labels only on Zen CPUs */ + if (!data->is_zen || !HAVE_TEMP(data, channel)) return 0; - } break; default: return 0; @@ -480,7 +464,7 @@ static void k10temp_init_debugfs(struct k10temp_data *data) char name[32]; /* Only show debugfs data for Family 17h/18h CPUs */ - if (!data->show_tdie) + if (!data->is_zen) return; scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); @@ -546,7 +530,7 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, amd_smn_read(amd_pci_dev_to_node_id(pdev), F17H_M70H_CCD_TEMP(i), ®val); if (regval & F17H_M70H_CCD_TEMP_VALID) - data->show_tccd |= BIT(i); + data->show_temp |= BIT(TCCD_BIT(i)); } } @@ -573,6 +557,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; data->pdev = pdev; + data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ if (boot_cpu_data.x86 == 0x15 && ((boot_cpu_data.x86_model & 0xf0) == 0x60 || @@ -582,7 +567,8 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_f17; - data->show_tdie = true; + data->show_temp |= BIT(TDIE_BIT); /* show Tdie */ + data->is_zen = true; switch (boot_cpu_data.x86_model) { case 0x1: /* Zen */ diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index 1eeb9d7de2a0..733c48bf6c98 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -262,10 +262,20 @@ static int lm73_detect(struct i2c_client *new_client, return 0; } +static const struct of_device_id lm73_of_match[] = { + { + .compatible = "ti,lm73", + }, + { }, +}; + +MODULE_DEVICE_TABLE(of, lm73_of_match); + static struct i2c_driver lm73_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm73", + .of_match_table = lm73_of_match, }, .probe = lm73_probe, .id_table = lm73_ids, diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 281c81edabc6..1f5743d68984 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -7,6 +7,11 @@ * * Copyright (c) 2019 Advantech * Author: Amy.Shih <amy.shih@advantech.com.tw> + * + * Supports the following chips: + * + * Chip #vin #fan #pwm #temp #dts chip ID + * nct7904d 20 12 4 5 8 0xc5 */ #include <linux/module.h> @@ -820,6 +825,10 @@ static const struct hwmon_channel_info *nct7904_info[] = { HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, + HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, + HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, + HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, + HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM, HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM), HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE, @@ -853,6 +862,18 @@ static const struct hwmon_channel_info *nct7904_info[] = { HWMON_T_CRIT_HYST, HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT | + HWMON_T_CRIT_HYST, + HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT | + HWMON_T_CRIT_HYST, + HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT | + HWMON_T_CRIT_HYST, + HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT | + HWMON_T_CRIT_HYST, + HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT | HWMON_T_CRIT_HYST), NULL }; diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index a9ea06204767..de12a565006d 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -92,10 +92,10 @@ config SENSORS_IRPS5401 be called irps5401. config SENSORS_ISL68137 - tristate "Intersil ISL68137" + tristate "Renesas Digital Multiphase Voltage Regulators" help - If you say yes here you get hardware monitoring support for Intersil - ISL68137. + If you say yes here you get hardware monitoring support for Renesas + digital multiphase voltage regulators. This driver can also be built as a module. If so, the module will be called isl68137. @@ -113,8 +113,8 @@ config SENSORS_LTC2978 tristate "Linear Technologies LTC2978 and compatibles" help If you say yes here you get hardware monitoring support for Linear - Technology LTC2974, LTC2975, LTC2977, LTC2978, LTC2980, LTC3880, - LTC3883, LTC3886, LTC3887, LTCM2987, LTM4675, and LTM4676. + Technology LTC2972, LTC2974, LTC2975, LTC2977, LTC2978, LTC2979, + LTC2980, and LTM2987. This driver can also be built as a module. If so, the module will be called ltc2978. @@ -123,9 +123,10 @@ config SENSORS_LTC2978_REGULATOR bool "Regulator support for LTC2978 and compatibles" depends on SENSORS_LTC2978 && REGULATOR help - If you say yes here you get regulator support for Linear - Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, LTM4676 - and LTM4686. + If you say yes here you get regulator support for Linear Technology + LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880, + LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686, + and LTM4700. config SENSORS_LTC3815 tristate "Linear Technologies LTC3815" @@ -209,10 +210,10 @@ config SENSORS_TPS40422 be called tps40422. config SENSORS_TPS53679 - tristate "TI TPS53679, TPS53688" + tristate "TI TPS53647, TPS53667, TPS53679, TPS53681, TPS53688" help If you say yes here you get hardware monitoring support for TI - TPS53679, TPS53688 + TPS53647, TPS53667, TPS53679, TPS53681, and TPS53688. This driver can also be built as a module. If so, the module will be called tps53679. diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 5caa37fbfc18..e25f541227da 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -226,7 +226,8 @@ static int adm1275_write_pmon_config(const struct adm1275_data *data, return ret; } -static int adm1275_read_word_data(struct i2c_client *client, int page, int reg) +static int adm1275_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); const struct adm1275_data *data = to_adm1275_data(info); @@ -239,58 +240,68 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg) case PMBUS_IOUT_UC_FAULT_LIMIT: if (!data->have_uc_fault) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1275_IOUT_WARN2_LIMIT); break; case PMBUS_IOUT_OC_FAULT_LIMIT: if (!data->have_oc_fault) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1275_IOUT_WARN2_LIMIT); break; case PMBUS_VOUT_OV_WARN_LIMIT: if (data->have_vout) return -ENODATA; - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, ADM1075_VAUX_OV_WARN_LIMIT); break; case PMBUS_VOUT_UV_WARN_LIMIT: if (data->have_vout) return -ENODATA; - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, ADM1075_VAUX_UV_WARN_LIMIT); break; case PMBUS_READ_VOUT: if (data->have_vout) return -ENODATA; - ret = pmbus_read_word_data(client, 0, ADM1075_READ_VAUX); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1075_READ_VAUX); break; case PMBUS_VIRT_READ_IOUT_MIN: if (!data->have_iout_min) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1293_IOUT_MIN); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1293_IOUT_MIN); break; case PMBUS_VIRT_READ_IOUT_MAX: - ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1275_PEAK_IOUT); break; case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1275_PEAK_VOUT); break; case PMBUS_VIRT_READ_VIN_MAX: - ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1275_PEAK_VIN); break; case PMBUS_VIRT_READ_PIN_MIN: if (!data->have_pin_min) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1293_PIN_MIN); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1293_PIN_MIN); break; case PMBUS_VIRT_READ_PIN_MAX: if (!data->have_pin_max) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1276_PEAK_PIN); break; case PMBUS_VIRT_READ_TEMP_MAX: if (!data->have_temp_max) return -ENXIO; - ret = pmbus_read_word_data(client, 0, ADM1278_PEAK_TEMP); + ret = pmbus_read_word_data(client, 0, 0xff, + ADM1278_PEAK_TEMP); break; case PMBUS_VIRT_RESET_IOUT_HISTORY: case PMBUS_VIRT_RESET_VOUT_HISTORY: diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index 3795fe55b84f..7d300f2f338d 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -33,9 +33,12 @@ #define CFFPS_INPUT_HISTORY_CMD 0xD6 #define CFFPS_INPUT_HISTORY_SIZE 100 +#define CFFPS_CCIN_REVISION GENMASK(7, 0) +#define CFFPS_CCIN_REVISION_LEGACY 0xde #define CFFPS_CCIN_VERSION GENMASK(15, 8) #define CFFPS_CCIN_VERSION_1 0x2b #define CFFPS_CCIN_VERSION_2 0x2e +#define CFFPS_CCIN_VERSION_3 0x51 /* STATUS_MFR_SPECIFIC bits */ #define CFFPS_MFR_FAN_FAULT BIT(0) @@ -148,7 +151,7 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf, struct ibm_cffps *psu = to_psu(idxp, idx); char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; - pmbus_set_page(psu->client, 0); + pmbus_set_page(psu->client, 0, 0xff); switch (idx) { case CFFPS_DEBUGFS_INPUT_HISTORY: @@ -247,7 +250,7 @@ static ssize_t ibm_cffps_debugfs_write(struct file *file, switch (idx) { case CFFPS_DEBUGFS_ON_OFF_CONFIG: - pmbus_set_page(psu->client, 0); + pmbus_set_page(psu->client, 0, 0xff); rc = simple_write_to_buffer(&data, 1, ppos, buf, count); if (rc <= 0) @@ -325,13 +328,13 @@ static int ibm_cffps_read_byte_data(struct i2c_client *client, int page, } static int ibm_cffps_read_word_data(struct i2c_client *client, int page, - int reg) + int phase, int reg) { int rc, mfr; switch (reg) { case PMBUS_STATUS_WORD: - rc = pmbus_read_word_data(client, page, reg); + rc = pmbus_read_word_data(client, page, phase, reg); if (rc < 0) return rc; @@ -348,7 +351,8 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, rc |= PB_STATUS_OFF; break; case PMBUS_VIRT_READ_VMON: - rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD); + rc = pmbus_read_word_data(client, page, phase, + CFFPS_12VCS_VOUT_CMD); break; default: rc = -ENODATA; @@ -379,7 +383,7 @@ static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev, dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n", brightness, next_led_state); - pmbus_set_page(psu->client, 0); + pmbus_set_page(psu->client, 0, 0xff); rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD, next_led_state); @@ -401,7 +405,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev, dev_dbg(&psu->client->dev, "LED blink set.\n"); - pmbus_set_page(psu->client, 0); + pmbus_set_page(psu->client, 0, 0xff); rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD, CFFPS_LED_BLINK); @@ -485,11 +489,14 @@ static int ibm_cffps_probe(struct i2c_client *client, vs = (enum versions)id->driver_data; if (vs == cffps_unknown) { + u16 ccin_revision = 0; u16 ccin_version = CFFPS_CCIN_VERSION_1; int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD); - if (ccin > 0) + if (ccin > 0) { + ccin_revision = FIELD_GET(CFFPS_CCIN_REVISION, ccin); ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin); + } switch (ccin_version) { default: @@ -499,6 +506,12 @@ static int ibm_cffps_probe(struct i2c_client *client, case CFFPS_CCIN_VERSION_2: vs = cffps2; break; + case CFFPS_CCIN_VERSION_3: + if (ccin_revision == CFFPS_CCIN_REVISION_LEGACY) + vs = cffps1; + else + vs = cffps2; + break; } /* Set the client name to include the version number. */ diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c index 0d878bcd6d26..3eea3e006a96 100644 --- a/drivers/hwmon/pmbus/ir35221.c +++ b/drivers/hwmon/pmbus/ir35221.c @@ -21,37 +21,42 @@ #define IR35221_MFR_IOUT_VALLEY 0xcb #define IR35221_MFR_TEMP_VALLEY 0xcc -static int ir35221_read_word_data(struct i2c_client *client, int page, int reg) +static int ir35221_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VIN_MAX: - ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK); + ret = pmbus_read_word_data(client, page, phase, + IR35221_MFR_VIN_PEAK); break; case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK); + ret = pmbus_read_word_data(client, page, phase, + IR35221_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_MAX: - ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK); + ret = pmbus_read_word_data(client, page, phase, + IR35221_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: - ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK); + ret = pmbus_read_word_data(client, page, phase, + IR35221_MFR_TEMP_PEAK); break; case PMBUS_VIRT_READ_VIN_MIN: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VIN_VALLEY); break; case PMBUS_VIRT_READ_VOUT_MIN: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VOUT_VALLEY); break; case PMBUS_VIRT_READ_IOUT_MIN: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_IOUT_VALLEY); break; case PMBUS_VIRT_READ_TEMP_MIN: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_TEMP_VALLEY); break; default: diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c index 515596c92fe1..4d2315208bb5 100644 --- a/drivers/hwmon/pmbus/isl68137.c +++ b/drivers/hwmon/pmbus/isl68137.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Hardware monitoring driver for Intersil ISL68137 + * Hardware monitoring driver for Renesas Digital Multiphase Voltage Regulators * * Copyright (c) 2017 Google Inc + * Copyright (c) 2020 Renesas Electronics America * */ @@ -14,9 +15,19 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/sysfs.h> + #include "pmbus.h" #define ISL68137_VOUT_AVS 0x30 +#define RAA_DMPVR2_READ_VMON 0xc8 + +enum versions { + isl68137, + raa_dmpvr2_1rail, + raa_dmpvr2_2rail, + raa_dmpvr2_3rail, + raa_dmpvr2_hv, +}; static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client, int page, @@ -49,7 +60,8 @@ static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client, * enabling AVS control is the workaround. */ if (op_val == ISL68137_VOUT_AVS) { - rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND); + rc = pmbus_read_word_data(client, page, 0xff, + PMBUS_VOUT_COMMAND); if (rc < 0) return rc; @@ -98,13 +110,31 @@ static const struct attribute_group enable_group = { .attrs = enable_attrs, }; -static const struct attribute_group *attribute_groups[] = { +static const struct attribute_group *isl68137_attribute_groups[] = { &enable_group, NULL, }; -static struct pmbus_driver_info isl68137_info = { - .pages = 2, +static int raa_dmpvr2_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VIRT_READ_VMON: + ret = pmbus_read_word_data(client, page, phase, + RAA_DMPVR2_READ_VMON); + break; + default: + ret = -ENODATA; + break; + } + + return ret; +} + +static struct pmbus_driver_info raa_dmpvr_info = { + .pages = 3, .format[PSC_VOLTAGE_IN] = direct, .format[PSC_VOLTAGE_OUT] = direct, .format[PSC_CURRENT_IN] = direct, @@ -113,7 +143,7 @@ static struct pmbus_driver_info isl68137_info = { .format[PSC_TEMPERATURE] = direct, .m[PSC_VOLTAGE_IN] = 1, .b[PSC_VOLTAGE_IN] = 0, - .R[PSC_VOLTAGE_IN] = 3, + .R[PSC_VOLTAGE_IN] = 2, .m[PSC_VOLTAGE_OUT] = 1, .b[PSC_VOLTAGE_OUT] = 0, .R[PSC_VOLTAGE_OUT] = 3, @@ -133,24 +163,76 @@ static struct pmbus_driver_info isl68137_info = { | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT - | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT, - .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT - | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT, - .groups = attribute_groups, + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT + | PMBUS_HAVE_VMON, + .func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT + | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP + | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT + | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT, + .func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT + | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP + | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT + | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT, }; static int isl68137_probe(struct i2c_client *client, const struct i2c_device_id *id) { - return pmbus_do_probe(client, id, &isl68137_info); + struct pmbus_driver_info *info; + + info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + memcpy(info, &raa_dmpvr_info, sizeof(*info)); + + switch (id->driver_data) { + case isl68137: + info->pages = 2; + info->R[PSC_VOLTAGE_IN] = 3; + info->func[0] &= ~PMBUS_HAVE_VMON; + info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT + | PMBUS_HAVE_POUT; + info->groups = isl68137_attribute_groups; + break; + case raa_dmpvr2_1rail: + info->pages = 1; + info->read_word_data = raa_dmpvr2_read_word_data; + break; + case raa_dmpvr2_2rail: + info->pages = 2; + info->read_word_data = raa_dmpvr2_read_word_data; + break; + case raa_dmpvr2_3rail: + info->read_word_data = raa_dmpvr2_read_word_data; + break; + case raa_dmpvr2_hv: + info->pages = 1; + info->R[PSC_VOLTAGE_IN] = 1; + info->m[PSC_VOLTAGE_OUT] = 2; + info->R[PSC_VOLTAGE_OUT] = 2; + info->m[PSC_CURRENT_IN] = 2; + info->m[PSC_POWER] = 2; + info->R[PSC_POWER] = -1; + info->read_word_data = raa_dmpvr2_read_word_data; + break; + default: + return -ENODEV; + } + + return pmbus_do_probe(client, id, info); } -static const struct i2c_device_id isl68137_id[] = { - {"isl68137", 0}, +static const struct i2c_device_id raa_dmpvr_id[] = { + {"isl68137", isl68137}, + {"raa_dmpvr2_1rail", raa_dmpvr2_1rail}, + {"raa_dmpvr2_2rail", raa_dmpvr2_2rail}, + {"raa_dmpvr2_3rail", raa_dmpvr2_3rail}, + {"raa_dmpvr2_hv", raa_dmpvr2_hv}, {} }; -MODULE_DEVICE_TABLE(i2c, isl68137_id); +MODULE_DEVICE_TABLE(i2c, raa_dmpvr_id); /* This is the driver that will be inserted */ static struct i2c_driver isl68137_driver = { @@ -159,11 +241,11 @@ static struct i2c_driver isl68137_driver = { }, .probe = isl68137_probe, .remove = pmbus_do_remove, - .id_table = isl68137_id, + .id_table = raa_dmpvr_id, }; module_i2c_driver(isl68137_driver); MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>"); -MODULE_DESCRIPTION("PMBus driver for Intersil ISL68137"); +MODULE_DESCRIPTION("PMBus driver for Renesas digital multiphase voltage regulators"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index 05fce86f1f81..9e4cf0800186 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c @@ -211,7 +211,8 @@ struct lm25066_data { #define to_lm25066_data(x) container_of(x, struct lm25066_data, info) -static int lm25066_read_word_data(struct i2c_client *client, int page, int reg) +static int lm25066_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); const struct lm25066_data *data = to_lm25066_data(info); @@ -219,7 +220,7 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg) switch (reg) { case PMBUS_VIRT_READ_VMON: - ret = pmbus_read_word_data(client, 0, LM25066_READ_VAUX); + ret = pmbus_read_word_data(client, 0, 0xff, LM25066_READ_VAUX); if (ret < 0) break; /* Adjust returned value to match VIN coefficients */ @@ -244,33 +245,40 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg) } break; case PMBUS_READ_IIN: - ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_MFR_READ_IIN); break; case PMBUS_READ_PIN: - ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_MFR_READ_PIN); break; case PMBUS_IIN_OC_WARN_LIMIT: - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, LM25066_MFR_IIN_OC_WARN_LIMIT); break; case PMBUS_PIN_OP_WARN_LIMIT: - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, LM25066_MFR_PIN_OP_WARN_LIMIT); break; case PMBUS_VIRT_READ_VIN_AVG: - ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_READ_AVG_VIN); break; case PMBUS_VIRT_READ_VOUT_AVG: - ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_READ_AVG_VOUT); break; case PMBUS_VIRT_READ_IIN_AVG: - ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_READ_AVG_IIN); break; case PMBUS_VIRT_READ_PIN_AVG: - ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_READ_AVG_PIN); break; case PMBUS_VIRT_READ_PIN_MAX: - ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK); + ret = pmbus_read_word_data(client, 0, 0xff, + LM25066_READ_PIN_PEAK); break; case PMBUS_VIRT_RESET_PIN_HISTORY: ret = 0; @@ -288,13 +296,14 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg) return ret; } -static int lm25056_read_word_data(struct i2c_client *client, int page, int reg) +static int lm25056_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_VMON_UV_WARN_LIMIT: - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, LM25056_VAUX_UV_WARN_LIMIT); if (ret < 0) break; @@ -302,7 +311,7 @@ static int lm25056_read_word_data(struct i2c_client *client, int page, int reg) ret = DIV_ROUND_CLOSEST(ret * 293, 6140); break; case PMBUS_VIRT_VMON_OV_WARN_LIMIT: - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, LM25056_VAUX_OV_WARN_LIMIT); if (ret < 0) break; @@ -310,7 +319,7 @@ static int lm25056_read_word_data(struct i2c_client *client, int page, int reg) ret = DIV_ROUND_CLOSEST(ret * 293, 6140); break; default: - ret = lm25066_read_word_data(client, page, reg); + ret = lm25066_read_word_data(client, page, phase, reg); break; } return ret; diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c index a91ed01abb68..7b0e6b37e247 100644 --- a/drivers/hwmon/pmbus/ltc2978.c +++ b/drivers/hwmon/pmbus/ltc2978.c @@ -19,8 +19,15 @@ #include <linux/regulator/driver.h> #include "pmbus.h" -enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882, - ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676, ltm4686 }; +enum chips { + /* Managers */ + ltc2972, ltc2974, ltc2975, ltc2977, ltc2978, ltc2979, ltc2980, + /* Controllers */ + ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7880, + /* Modules */ + ltm2987, ltm4664, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680, ltm4686, + ltm4700, +}; /* Common for all chips */ #define LTC2978_MFR_VOUT_PEAK 0xdd @@ -43,9 +50,10 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882, #define LTC3880_MFR_CLEAR_PEAKS 0xe3 #define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4 -/* LTC3883 and LTC3886 only */ +/* LTC3883, LTC3884, LTC3886, LTC3889 and LTC7880 only */ #define LTC3883_MFR_IIN_PEAK 0xe1 + /* LTC2975 only */ #define LTC2975_MFR_IIN_PEAK 0xc4 #define LTC2975_MFR_IIN_MIN 0xc5 @@ -54,27 +62,41 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882, #define LTC2978_ID_MASK 0xfff0 +#define LTC2972_ID 0x0310 #define LTC2974_ID 0x0210 #define LTC2975_ID 0x0220 #define LTC2977_ID 0x0130 #define LTC2978_ID_REV1 0x0110 /* Early revision */ #define LTC2978_ID_REV2 0x0120 +#define LTC2979_ID_A 0x8060 +#define LTC2979_ID_B 0x8070 #define LTC2980_ID_A 0x8030 /* A/B for two die IDs */ #define LTC2980_ID_B 0x8040 #define LTC3880_ID 0x4020 #define LTC3882_ID 0x4200 #define LTC3882_ID_D1 0x4240 /* Dash 1 */ #define LTC3883_ID 0x4300 +#define LTC3884_ID 0x4C00 #define LTC3886_ID 0x4600 #define LTC3887_ID 0x4700 #define LTM2987_ID_A 0x8010 /* A/B for two die IDs */ #define LTM2987_ID_B 0x8020 +#define LTC3889_ID 0x4900 +#define LTC7880_ID 0x49E0 +#define LTM4664_ID 0x4120 #define LTM4675_ID 0x47a0 #define LTM4676_ID_REV1 0x4400 #define LTM4676_ID_REV2 0x4480 #define LTM4676A_ID 0x47e0 +#define LTM4677_ID_REV1 0x47B0 +#define LTM4677_ID_REV2 0x47D0 +#define LTM4678_ID_REV1 0x4100 +#define LTM4678_ID_REV2 0x4110 +#define LTM4680_ID 0x4140 #define LTM4686_ID 0x4770 +#define LTM4700_ID 0x4130 +#define LTC2972_NUM_PAGES 2 #define LTC2974_NUM_PAGES 4 #define LTC2978_NUM_PAGES 8 #define LTC3880_NUM_PAGES 2 @@ -151,7 +173,8 @@ static int ltc_wait_ready(struct i2c_client *client) return -ETIMEDOUT; } -static int ltc_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc_read_word_data(struct i2c_client *client, int page, int phase, + int reg) { int ret; @@ -159,7 +182,7 @@ static int ltc_read_word_data(struct i2c_client *client, int page, int reg) if (ret < 0) return ret; - return pmbus_read_word_data(client, page, reg); + return pmbus_read_word_data(client, page, 0xff, reg); } static int ltc_read_byte_data(struct i2c_client *client, int page, int reg) @@ -202,7 +225,7 @@ static int ltc_get_max(struct ltc2978_data *data, struct i2c_client *client, { int ret; - ret = ltc_read_word_data(client, page, reg); + ret = ltc_read_word_data(client, page, 0xff, reg); if (ret >= 0) { if (lin11_to_val(ret) > lin11_to_val(*pmax)) *pmax = ret; @@ -216,7 +239,7 @@ static int ltc_get_min(struct ltc2978_data *data, struct i2c_client *client, { int ret; - ret = ltc_read_word_data(client, page, reg); + ret = ltc_read_word_data(client, page, 0xff, reg); if (ret >= 0) { if (lin11_to_val(ret) < lin11_to_val(*pmin)) *pmin = ret; @@ -238,7 +261,8 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page, &data->vin_max); break; case PMBUS_VIRT_READ_VOUT_MAX: - ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK); + ret = ltc_read_word_data(client, page, 0xff, + LTC2978_MFR_VOUT_PEAK); if (ret >= 0) { /* * VOUT is 16 bit unsigned with fixed exponent, @@ -269,7 +293,8 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page, return ret; } -static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc2978_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct ltc2978_data *data = to_ltc2978_data(info); @@ -281,7 +306,8 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg) &data->vin_min); break; case PMBUS_VIRT_READ_VOUT_MIN: - ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_MIN); + ret = ltc_read_word_data(client, page, phase, + LTC2978_MFR_VOUT_MIN); if (ret >= 0) { /* * VOUT_MIN is known to not be supported on some lots @@ -314,7 +340,8 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg) return ret; } -static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc2974_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct ltc2978_data *data = to_ltc2978_data(info); @@ -333,13 +360,14 @@ static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg) ret = 0; break; default: - ret = ltc2978_read_word_data(client, page, reg); + ret = ltc2978_read_word_data(client, page, phase, reg); break; } return ret; } -static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc2975_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct ltc2978_data *data = to_ltc2978_data(info); @@ -367,13 +395,14 @@ static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg) ret = 0; break; default: - ret = ltc2978_read_word_data(client, page, reg); + ret = ltc2978_read_word_data(client, page, phase, reg); break; } return ret; } -static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc3880_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct ltc2978_data *data = to_ltc2978_data(info); @@ -405,7 +434,8 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg) return ret; } -static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc3883_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct ltc2978_data *data = to_ltc2978_data(info); @@ -420,7 +450,7 @@ static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg) ret = 0; break; default: - ret = ltc3880_read_word_data(client, page, reg); + ret = ltc3880_read_word_data(client, page, phase, reg); break; } return ret; @@ -492,20 +522,30 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page, } static const struct i2c_device_id ltc2978_id[] = { + {"ltc2972", ltc2972}, {"ltc2974", ltc2974}, {"ltc2975", ltc2975}, {"ltc2977", ltc2977}, {"ltc2978", ltc2978}, + {"ltc2979", ltc2979}, {"ltc2980", ltc2980}, {"ltc3880", ltc3880}, {"ltc3882", ltc3882}, {"ltc3883", ltc3883}, + {"ltc3884", ltc3884}, {"ltc3886", ltc3886}, {"ltc3887", ltc3887}, + {"ltc3889", ltc3889}, + {"ltc7880", ltc7880}, {"ltm2987", ltm2987}, + {"ltm4664", ltm4664}, {"ltm4675", ltm4675}, {"ltm4676", ltm4676}, + {"ltm4677", ltm4677}, + {"ltm4678", ltm4678}, + {"ltm4680", ltm4680}, {"ltm4686", ltm4686}, + {"ltm4700", ltm4700}, {} }; MODULE_DEVICE_TABLE(i2c, ltc2978_id); @@ -555,7 +595,9 @@ static int ltc2978_get_id(struct i2c_client *client) chip_id &= LTC2978_ID_MASK; - if (chip_id == LTC2974_ID) + if (chip_id == LTC2972_ID) + return ltc2972; + else if (chip_id == LTC2974_ID) return ltc2974; else if (chip_id == LTC2975_ID) return ltc2975; @@ -563,6 +605,8 @@ static int ltc2978_get_id(struct i2c_client *client) return ltc2977; else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) return ltc2978; + else if (chip_id == LTC2979_ID_A || chip_id == LTC2979_ID_B) + return ltc2979; else if (chip_id == LTC2980_ID_A || chip_id == LTC2980_ID_B) return ltc2980; else if (chip_id == LTC3880_ID) @@ -571,19 +615,35 @@ static int ltc2978_get_id(struct i2c_client *client) return ltc3882; else if (chip_id == LTC3883_ID) return ltc3883; + else if (chip_id == LTC3884_ID) + return ltc3884; else if (chip_id == LTC3886_ID) return ltc3886; else if (chip_id == LTC3887_ID) return ltc3887; + else if (chip_id == LTC3889_ID) + return ltc3889; + else if (chip_id == LTC7880_ID) + return ltc7880; else if (chip_id == LTM2987_ID_A || chip_id == LTM2987_ID_B) return ltm2987; + else if (chip_id == LTM4664_ID) + return ltm4664; else if (chip_id == LTM4675_ID) return ltm4675; else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 || chip_id == LTM4676A_ID) return ltm4676; + else if (chip_id == LTM4677_ID_REV1 || chip_id == LTM4677_ID_REV2) + return ltm4677; + else if (chip_id == LTM4678_ID_REV1 || chip_id == LTM4678_ID_REV2) + return ltm4678; + else if (chip_id == LTM4680_ID) + return ltm4680; else if (chip_id == LTM4686_ID) return ltm4686; + else if (chip_id == LTM4700_ID) + return ltm4700; dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id); return -ENODEV; @@ -637,6 +697,19 @@ static int ltc2978_probe(struct i2c_client *client, data->temp2_max = 0x7c00; switch (data->id) { + case ltc2972: + info->read_word_data = ltc2975_read_word_data; + info->pages = LTC2972_NUM_PAGES; + info->func[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN + | PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT + | PMBUS_HAVE_TEMP2; + for (i = 0; i < info->pages; i++) { + info->func[i] |= PMBUS_HAVE_VOUT + | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_POUT + | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; + } + break; case ltc2974: info->read_word_data = ltc2974_read_word_data; info->pages = LTC2974_NUM_PAGES; @@ -662,8 +735,10 @@ static int ltc2978_probe(struct i2c_client *client, | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; } break; + case ltc2977: case ltc2978: + case ltc2979: case ltc2980: case ltm2987: info->read_word_data = ltc2978_read_word_data; @@ -680,6 +755,7 @@ static int ltc2978_probe(struct i2c_client *client, case ltc3887: case ltm4675: case ltm4676: + case ltm4677: case ltm4686: data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING; info->read_word_data = ltc3880_read_word_data; @@ -721,7 +797,14 @@ static int ltc2978_probe(struct i2c_client *client, | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP; break; + case ltc3884: case ltc3886: + case ltc3889: + case ltc7880: + case ltm4664: + case ltm4678: + case ltm4680: + case ltm4700: data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING; info->read_word_data = ltc3883_read_word_data; info->pages = LTC3880_NUM_PAGES; @@ -752,22 +835,33 @@ static int ltc2978_probe(struct i2c_client *client, return pmbus_do_probe(client, id, info); } + #ifdef CONFIG_OF static const struct of_device_id ltc2978_of_match[] = { + { .compatible = "lltc,ltc2972" }, { .compatible = "lltc,ltc2974" }, { .compatible = "lltc,ltc2975" }, { .compatible = "lltc,ltc2977" }, { .compatible = "lltc,ltc2978" }, + { .compatible = "lltc,ltc2979" }, { .compatible = "lltc,ltc2980" }, { .compatible = "lltc,ltc3880" }, { .compatible = "lltc,ltc3882" }, { .compatible = "lltc,ltc3883" }, + { .compatible = "lltc,ltc3884" }, { .compatible = "lltc,ltc3886" }, { .compatible = "lltc,ltc3887" }, + { .compatible = "lltc,ltc3889" }, + { .compatible = "lltc,ltc7880" }, { .compatible = "lltc,ltm2987" }, + { .compatible = "lltc,ltm4664" }, { .compatible = "lltc,ltm4675" }, { .compatible = "lltc,ltm4676" }, + { .compatible = "lltc,ltm4677" }, + { .compatible = "lltc,ltm4678" }, + { .compatible = "lltc,ltm4680" }, { .compatible = "lltc,ltm4686" }, + { .compatible = "lltc,ltm4700" }, { } }; MODULE_DEVICE_TABLE(of, ltc2978_of_match); diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c index b83a18a58364..3036263e0a66 100644 --- a/drivers/hwmon/pmbus/ltc3815.c +++ b/drivers/hwmon/pmbus/ltc3815.c @@ -55,7 +55,7 @@ static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg) * LTC3815 does not support the CLEAR_FAULTS command. * Emulate it by clearing the status register. */ - ret = pmbus_read_word_data(client, 0, PMBUS_STATUS_WORD); + ret = pmbus_read_word_data(client, 0, 0xff, PMBUS_STATUS_WORD); if (ret > 0) { pmbus_write_word_data(client, 0, PMBUS_STATUS_WORD, ret); @@ -69,25 +69,31 @@ static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg) return ret; } -static int ltc3815_read_word_data(struct i2c_client *client, int page, int reg) +static int ltc3815_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VIN_MAX: - ret = pmbus_read_word_data(client, page, LTC3815_MFR_VIN_PEAK); + ret = pmbus_read_word_data(client, page, phase, + LTC3815_MFR_VIN_PEAK); break; case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, page, LTC3815_MFR_VOUT_PEAK); + ret = pmbus_read_word_data(client, page, phase, + LTC3815_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: - ret = pmbus_read_word_data(client, page, LTC3815_MFR_TEMP_PEAK); + ret = pmbus_read_word_data(client, page, phase, + LTC3815_MFR_TEMP_PEAK); break; case PMBUS_VIRT_READ_IOUT_MAX: - ret = pmbus_read_word_data(client, page, LTC3815_MFR_IOUT_PEAK); + ret = pmbus_read_word_data(client, page, phase, + LTC3815_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_IIN_MAX: - ret = pmbus_read_word_data(client, page, LTC3815_MFR_IIN_PEAK); + ret = pmbus_read_word_data(client, page, phase, + LTC3815_MFR_IIN_PEAK); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: case PMBUS_VIRT_RESET_VIN_HISTORY: diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c index b3e7b8d2e69d..288e93f74c28 100644 --- a/drivers/hwmon/pmbus/max16064.c +++ b/drivers/hwmon/pmbus/max16064.c @@ -15,17 +15,18 @@ #define MAX16064_MFR_VOUT_PEAK 0xd4 #define MAX16064_MFR_TEMPERATURE_PEAK 0xd6 -static int max16064_read_word_data(struct i2c_client *client, int page, int reg) +static int max16064_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX16064_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX16064_MFR_TEMPERATURE_PEAK); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c index 294e2212f61e..c0bb05487e0e 100644 --- a/drivers/hwmon/pmbus/max20730.c +++ b/drivers/hwmon/pmbus/max20730.c @@ -85,7 +85,8 @@ static u32 max_current[][5] = { [max20743] = { 18900, 24100, 29200, 34100 }, }; -static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +static int max20730_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); const struct max20730_data *data = to_max20730_data(info); diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c index 254b0f98c755..d9aa5c873d21 100644 --- a/drivers/hwmon/pmbus/max31785.c +++ b/drivers/hwmon/pmbus/max31785.c @@ -72,7 +72,7 @@ static int max31785_read_long_data(struct i2c_client *client, int page, cmdbuf[0] = reg; - rc = pmbus_set_page(client, page); + rc = pmbus_set_page(client, page, 0xff); if (rc < 0) return rc; @@ -110,7 +110,7 @@ static int max31785_get_pwm_mode(struct i2c_client *client, int page) if (config < 0) return config; - command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1); + command = pmbus_read_word_data(client, page, 0xff, PMBUS_FAN_COMMAND_1); if (command < 0) return command; @@ -126,7 +126,7 @@ static int max31785_get_pwm_mode(struct i2c_client *client, int page) } static int max31785_read_word_data(struct i2c_client *client, int page, - int reg) + int phase, int reg) { u32 val; int rv; diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c index 5c63a6600729..18b4e071067f 100644 --- a/drivers/hwmon/pmbus/max34440.c +++ b/drivers/hwmon/pmbus/max34440.c @@ -41,7 +41,8 @@ struct max34440_data { #define to_max34440_data(x) container_of(x, struct max34440_data, info) -static int max34440_read_word_data(struct i2c_client *client, int page, int reg) +static int max34440_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; const struct pmbus_driver_info *info = pmbus_get_driver_info(client); @@ -49,44 +50,44 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg) switch (reg) { case PMBUS_VIRT_READ_VOUT_MIN: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34440_MFR_VOUT_MIN); break; case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34440_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_AVG: if (data->id != max34446 && data->id != max34451) return -ENXIO; - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34446_MFR_IOUT_AVG); break; case PMBUS_VIRT_READ_IOUT_MAX: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34440_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_POUT_AVG: if (data->id != max34446) return -ENXIO; - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34446_MFR_POUT_AVG); break; case PMBUS_VIRT_READ_POUT_MAX: if (data->id != max34446) return -ENXIO; - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34446_MFR_POUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_AVG: if (data->id != max34446 && data->id != max34460 && data->id != max34461) return -ENXIO; - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34446_MFR_TEMPERATURE_AVG); break; case PMBUS_VIRT_READ_TEMP_MAX: - ret = pmbus_read_word_data(client, page, + ret = pmbus_read_word_data(client, page, phase, MAX34440_MFR_TEMPERATURE_PEAK); break; case PMBUS_VIRT_RESET_POUT_HISTORY: @@ -159,14 +160,14 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg) int mfg_status; if (page >= 0) { - ret = pmbus_set_page(client, page); + ret = pmbus_set_page(client, page, 0xff); if (ret < 0) return ret; } switch (reg) { case PMBUS_STATUS_IOUT: - mfg_status = pmbus_read_word_data(client, 0, + mfg_status = pmbus_read_word_data(client, 0, 0xff, PMBUS_STATUS_MFR_SPECIFIC); if (mfg_status < 0) return mfg_status; @@ -176,7 +177,7 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg) ret |= PB_IOUT_OC_FAULT; break; case PMBUS_STATUS_TEMPERATURE: - mfg_status = pmbus_read_word_data(client, 0, + mfg_status = pmbus_read_word_data(client, 0, 0xff, PMBUS_STATUS_MFR_SPECIFIC); if (mfg_status < 0) return mfg_status; diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c index bc5f4cb6450e..643ccfc05106 100644 --- a/drivers/hwmon/pmbus/max8688.c +++ b/drivers/hwmon/pmbus/max8688.c @@ -28,7 +28,8 @@ #define MAX8688_STATUS_OT_FAULT BIT(13) #define MAX8688_STATUS_OT_WARNING BIT(14) -static int max8688_read_word_data(struct i2c_client *client, int page, int reg) +static int max8688_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { int ret; @@ -37,13 +38,15 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg) switch (reg) { case PMBUS_VIRT_READ_VOUT_MAX: - ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK); + ret = pmbus_read_word_data(client, 0, 0xff, + MAX8688_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_MAX: - ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK); + ret = pmbus_read_word_data(client, 0, 0xff, + MAX8688_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: - ret = pmbus_read_word_data(client, 0, + ret = pmbus_read_word_data(client, 0, 0xff, MAX8688_MFR_TEMPERATURE_PEAK); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: @@ -94,7 +97,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) switch (reg) { case PMBUS_STATUS_VOUT: - mfg_status = pmbus_read_word_data(client, 0, + mfg_status = pmbus_read_word_data(client, 0, 0xff, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; @@ -108,7 +111,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) ret |= PB_VOLTAGE_OV_FAULT; break; case PMBUS_STATUS_IOUT: - mfg_status = pmbus_read_word_data(client, 0, + mfg_status = pmbus_read_word_data(client, 0, 0xff, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; @@ -120,7 +123,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) ret |= PB_IOUT_OC_FAULT; break; case PMBUS_STATUS_TEMPERATURE: - mfg_status = pmbus_read_word_data(client, 0, + mfg_status = pmbus_read_word_data(client, 0, 0xff, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index 51e8312b6c2d..6d384e8ee1db 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -102,10 +102,10 @@ static int pmbus_identify(struct i2c_client *client, int page; for (page = 1; page < PMBUS_PAGES; page++) { - if (pmbus_set_page(client, page) < 0) + if (pmbus_set_page(client, page, 0xff) < 0) break; } - pmbus_set_page(client, 0); + pmbus_set_page(client, 0, 0xff); info->pages = page; } else { info->pages = 1; diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 13b34bd67f23..18e06fc6c53f 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -119,6 +119,9 @@ enum pmbus_regs { PMBUS_MFR_DATE = 0x9D, PMBUS_MFR_SERIAL = 0x9E, + PMBUS_IC_DEVICE_ID = 0xAD, + PMBUS_IC_DEVICE_REV = 0xAE, + /* * Virtual registers. * Useful to support attributes which are not supported by standard PMBus @@ -359,6 +362,7 @@ enum pmbus_sensor_classes { }; #define PMBUS_PAGES 32 /* Per PMBus specification */ +#define PMBUS_PHASES 8 /* Maximum number of phases per page */ /* Functionality bit mask */ #define PMBUS_HAVE_VIN BIT(0) @@ -385,13 +389,15 @@ enum pmbus_sensor_classes { #define PMBUS_HAVE_PWM34 BIT(21) #define PMBUS_HAVE_SAMPLES BIT(22) -#define PMBUS_PAGE_VIRTUAL BIT(31) +#define PMBUS_PHASE_VIRTUAL BIT(30) /* Phases on this page are virtual */ +#define PMBUS_PAGE_VIRTUAL BIT(31) /* Page is virtual */ enum pmbus_data_format { linear = 0, direct, vid }; enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { int pages; /* Total number of pages */ + u8 phases[PMBUS_PAGES]; /* Number of phases per page */ enum pmbus_data_format format[PSC_NUM_CLASSES]; enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */ /* @@ -403,6 +409,7 @@ struct pmbus_driver_info { int R[PSC_NUM_CLASSES]; /* exponent */ u32 func[PMBUS_PAGES]; /* Functionality, per page */ + u32 pfunc[PMBUS_PHASES];/* Functionality, per phase */ /* * The following functions map manufacturing specific register values * to PMBus standard register values. Specify only if mapping is @@ -415,7 +422,8 @@ struct pmbus_driver_info { * the standard register. */ int (*read_byte_data)(struct i2c_client *client, int page, int reg); - int (*read_word_data)(struct i2c_client *client, int page, int reg); + int (*read_word_data)(struct i2c_client *client, int page, int phase, + int reg); int (*write_word_data)(struct i2c_client *client, int page, int reg, u16 word); int (*write_byte)(struct i2c_client *client, int page, u8 value); @@ -454,9 +462,11 @@ extern const struct regulator_ops pmbus_regulator_ops; /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); -int pmbus_set_page(struct i2c_client *client, int page); -int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg); -int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word); +int pmbus_set_page(struct i2c_client *client, int page, int phase); +int pmbus_read_word_data(struct i2c_client *client, int page, int phase, + u8 reg); +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, + u16 word); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index d9c17feb7b4a..8d321bf7d15b 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -49,6 +49,7 @@ struct pmbus_sensor { char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */ struct device_attribute attribute; u8 page; /* page number */ + u8 phase; /* phase number, 0xff for all phases */ u16 reg; /* register */ enum pmbus_sensor_classes class; /* sensor class */ bool update; /* runtime sensor update needed */ @@ -109,6 +110,7 @@ struct pmbus_data { int (*read_status)(struct i2c_client *client, int page); u8 currpage; + u8 currphase; /* current phase, 0xff for all */ }; struct pmbus_debugfs_entry { @@ -146,15 +148,16 @@ void pmbus_clear_cache(struct i2c_client *client) } EXPORT_SYMBOL_GPL(pmbus_clear_cache); -int pmbus_set_page(struct i2c_client *client, int page) +int pmbus_set_page(struct i2c_client *client, int page, int phase) { struct pmbus_data *data = i2c_get_clientdata(client); int rv; - if (page < 0 || page == data->currpage) + if (page < 0) return 0; - if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) { + if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL) && + data->info->pages > 1 && page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); if (rv < 0) return rv; @@ -166,9 +169,17 @@ int pmbus_set_page(struct i2c_client *client, int page) if (rv != page) return -EIO; } - data->currpage = page; + if (data->info->phases[page] && data->currphase != phase && + !(data->info->func[page] & PMBUS_PHASE_VIRTUAL)) { + rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE, + phase); + if (rv) + return rv; + } + data->currphase = phase; + return 0; } EXPORT_SYMBOL_GPL(pmbus_set_page); @@ -177,7 +188,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) { int rv; - rv = pmbus_set_page(client, page); + rv = pmbus_set_page(client, page, 0xff); if (rv < 0) return rv; @@ -208,7 +219,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, { int rv; - rv = pmbus_set_page(client, page); + rv = pmbus_set_page(client, page, 0xff); if (rv < 0) return rv; @@ -286,11 +297,11 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id, } EXPORT_SYMBOL_GPL(pmbus_update_fan); -int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg) +int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg) { int rv; - rv = pmbus_set_page(client, page); + rv = pmbus_set_page(client, page, phase); if (rv < 0) return rv; @@ -320,14 +331,15 @@ static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg) * _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if * a device specific mapping function exists and calls it if necessary. */ -static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg) +static int _pmbus_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { struct pmbus_data *data = i2c_get_clientdata(client); const struct pmbus_driver_info *info = data->info; int status; if (info->read_word_data) { - status = info->read_word_data(client, page, reg); + status = info->read_word_data(client, page, phase, reg); if (status != -ENODATA) return status; } @@ -335,14 +347,20 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg) if (reg >= PMBUS_VIRT_BASE) return pmbus_read_virt_reg(client, page, reg); - return pmbus_read_word_data(client, page, reg); + return pmbus_read_word_data(client, page, phase, reg); +} + +/* Same as above, but without phase parameter, for use in check functions */ +static int __pmbus_read_word_data(struct i2c_client *client, int page, int reg) +{ + return _pmbus_read_word_data(client, page, 0xff, reg); } int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) { int rv; - rv = pmbus_set_page(client, page); + rv = pmbus_set_page(client, page, 0xff); if (rv < 0) return rv; @@ -354,7 +372,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value) { int rv; - rv = pmbus_set_page(client, page); + rv = pmbus_set_page(client, page, 0xff); if (rv < 0) return rv; @@ -440,7 +458,7 @@ static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id, have_rpm = !!(config & pmbus_fan_rpm_mask[id]); if (want_rpm == have_rpm) - return pmbus_read_word_data(client, page, + return pmbus_read_word_data(client, page, 0xff, pmbus_fan_command_registers[id]); /* Can't sensibly map between RPM and PWM, just return zero */ @@ -530,7 +548,7 @@ EXPORT_SYMBOL_GPL(pmbus_check_byte_register); bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) { - return pmbus_check_register(client, _pmbus_read_word_data, page, reg); + return pmbus_check_register(client, __pmbus_read_word_data, page, reg); } EXPORT_SYMBOL_GPL(pmbus_check_word_register); @@ -595,6 +613,7 @@ static struct pmbus_data *pmbus_update_device(struct device *dev) sensor->data = _pmbus_read_word_data(client, sensor->page, + sensor->phase, sensor->reg); } pmbus_clear_faults(client); @@ -1076,7 +1095,8 @@ static int pmbus_add_boolean(struct pmbus_data *data, static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, const char *name, const char *type, - int seq, int page, int reg, + int seq, int page, int phase, + int reg, enum pmbus_sensor_classes class, bool update, bool readonly, bool convert) @@ -1100,6 +1120,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, readonly = true; sensor->page = page; + sensor->phase = phase; sensor->reg = reg; sensor->class = class; sensor->update = update; @@ -1119,7 +1140,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, static int pmbus_add_label(struct pmbus_data *data, const char *name, int seq, - const char *lstring, int index) + const char *lstring, int index, int phase) { struct pmbus_label *label; struct device_attribute *a; @@ -1131,11 +1152,21 @@ static int pmbus_add_label(struct pmbus_data *data, a = &label->attribute; snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq); - if (!index) - strncpy(label->label, lstring, sizeof(label->label) - 1); - else - snprintf(label->label, sizeof(label->label), "%s%d", lstring, - index); + if (!index) { + if (phase == 0xff) + strncpy(label->label, lstring, + sizeof(label->label) - 1); + else + snprintf(label->label, sizeof(label->label), "%s.%d", + lstring, phase); + } else { + if (phase == 0xff) + snprintf(label->label, sizeof(label->label), "%s%d", + lstring, index); + else + snprintf(label->label, sizeof(label->label), "%s%d.%d", + lstring, index, phase); + } pmbus_dev_attr_init(a, label->name, 0444, pmbus_show_label, NULL); return pmbus_add_attribute(data, &a->attr); @@ -1200,7 +1231,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client, for (i = 0; i < nlimit; i++) { if (pmbus_check_word_register(client, page, l->reg)) { curr = pmbus_add_sensor(data, name, l->attr, index, - page, l->reg, attr->class, + page, 0xff, l->reg, attr->class, attr->update || l->update, false, true); if (!curr) @@ -1227,7 +1258,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, struct pmbus_data *data, const struct pmbus_driver_info *info, const char *name, - int index, int page, + int index, int page, int phase, const struct pmbus_sensor_attr *attr, bool paged) { @@ -1237,15 +1268,16 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, if (attr->label) { ret = pmbus_add_label(data, name, index, attr->label, - paged ? page + 1 : 0); + paged ? page + 1 : 0, phase); if (ret) return ret; } - base = pmbus_add_sensor(data, name, "input", index, page, attr->reg, - attr->class, true, true, true); + base = pmbus_add_sensor(data, name, "input", index, page, phase, + attr->reg, attr->class, true, true, true); if (!base) return -ENOMEM; - if (attr->sfunc) { + /* No limit and alarm attributes for phase specific sensors */ + if (attr->sfunc && phase == 0xff) { ret = pmbus_add_limit_attrs(client, data, info, name, index, page, base, attr); if (ret < 0) @@ -1315,10 +1347,25 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client, continue; ret = pmbus_add_sensor_attrs_one(client, data, info, name, index, page, - attrs, paged); + 0xff, attrs, paged); if (ret) return ret; index++; + if (info->phases[page]) { + int phase; + + for (phase = 0; phase < info->phases[page]; + phase++) { + if (!(info->pfunc[phase] & attrs->func)) + continue; + ret = pmbus_add_sensor_attrs_one(client, + data, info, name, index, page, + phase, attrs, paged); + if (ret) + return ret; + index++; + } + } } attrs++; } @@ -1822,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client, struct pmbus_sensor *sensor; sensor = pmbus_add_sensor(data, "fan", "target", index, page, - PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN, + PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN, false, false, true); if (!sensor) @@ -1833,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client, return 0; sensor = pmbus_add_sensor(data, "pwm", NULL, index, page, - PMBUS_VIRT_PWM_1 + id, PSC_PWM, + PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM, false, false, true); if (!sensor) return -ENOMEM; sensor = pmbus_add_sensor(data, "pwm", "enable", index, page, - PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM, + PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM, true, false, false); if (!sensor) @@ -1882,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client, continue; if (pmbus_add_sensor(data, "fan", "input", index, - page, pmbus_fan_registers[f], + page, pmbus_fan_registers[f], 0xff, PSC_FAN, true, true, true) == NULL) return -ENOMEM; @@ -1964,7 +2011,7 @@ static ssize_t pmbus_show_samples(struct device *dev, struct i2c_client *client = to_i2c_client(dev->parent); struct pmbus_samples_reg *reg = to_samples_reg(devattr); - val = _pmbus_read_word_data(client, reg->page, reg->attr->reg); + val = _pmbus_read_word_data(client, reg->page, 0xff, reg->attr->reg); if (val < 0) return val; @@ -2120,7 +2167,7 @@ static int pmbus_read_status_byte(struct i2c_client *client, int page) static int pmbus_read_status_word(struct i2c_client *client, int page) { - return _pmbus_read_word_data(client, page, PMBUS_STATUS_WORD); + return _pmbus_read_word_data(client, page, 0xff, PMBUS_STATUS_WORD); } static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, @@ -2482,6 +2529,8 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, if (pdata) data->flags = pdata->flags; data->info = info; + data->currpage = 0xff; + data->currphase = 0xfe; ret = pmbus_init_common(client, data, info); if (ret < 0) diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c index 9c22e9013dd7..157c99ffb52b 100644 --- a/drivers/hwmon/pmbus/tps53679.c +++ b/drivers/hwmon/pmbus/tps53679.c @@ -6,13 +6,21 @@ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com> */ +#include <linux/bits.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include "pmbus.h" +enum chips { + tps53647, tps53667, tps53679, tps53681, tps53688 +}; + +#define TPS53647_PAGE_NUM 1 + #define TPS53679_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */ #define TPS53679_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */ #define TPS53679_PROT_VR13_10MV 0x04 /* VR13.0 mode, 10-mV DAC */ @@ -20,13 +28,19 @@ #define TPS53679_PROT_VR13_5MV 0x07 /* VR13.0 mode, 5-mV DAC */ #define TPS53679_PAGE_NUM 2 -static int tps53679_identify(struct i2c_client *client, - struct pmbus_driver_info *info) +#define TPS53681_DEVICE_ID 0x81 + +#define TPS53681_PMBUS_REVISION 0x33 + +#define TPS53681_MFR_SPECIFIC_20 0xe4 /* Number of phases, per page */ + +static int tps53679_identify_mode(struct i2c_client *client, + struct pmbus_driver_info *info) { u8 vout_params; int i, ret; - for (i = 0; i < TPS53679_PAGE_NUM; i++) { + for (i = 0; i < info->pages; i++) { /* Read the register with VOUT scaling value.*/ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); if (ret < 0) @@ -52,48 +66,180 @@ static int tps53679_identify(struct i2c_client *client, return 0; } +static int tps53679_identify_phases(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + int ret; + + /* On TPS53681, only channel A provides per-phase output current */ + ret = pmbus_read_byte_data(client, 0, TPS53681_MFR_SPECIFIC_20); + if (ret < 0) + return ret; + info->phases[0] = (ret & 0x07) + 1; + + return 0; +} + +static int tps53679_identify_chip(struct i2c_client *client, + u8 revision, u16 id) +{ + u8 buf[I2C_SMBUS_BLOCK_MAX]; + int ret; + + ret = pmbus_read_byte_data(client, 0, PMBUS_REVISION); + if (ret < 0) + return ret; + if (ret != revision) { + dev_err(&client->dev, "Unexpected PMBus revision 0x%x\n", ret); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf); + if (ret < 0) + return ret; + if (ret != 1 || buf[0] != id) { + dev_err(&client->dev, "Unexpected device ID 0x%x\n", buf[0]); + return -ENODEV; + } + return 0; +} + +/* + * Common identification function for chips with multi-phase support. + * Since those chips have special configuration registers, we want to have + * some level of reassurance that we are really talking with the chip + * being probed. Check PMBus revision and chip ID. + */ +static int tps53679_identify_multiphase(struct i2c_client *client, + struct pmbus_driver_info *info, + int pmbus_rev, int device_id) +{ + int ret; + + ret = tps53679_identify_chip(client, pmbus_rev, device_id); + if (ret < 0) + return ret; + + ret = tps53679_identify_mode(client, info); + if (ret < 0) + return ret; + + return tps53679_identify_phases(client, info); +} + +static int tps53679_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + return tps53679_identify_mode(client, info); +} + +static int tps53681_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + return tps53679_identify_multiphase(client, info, + TPS53681_PMBUS_REVISION, + TPS53681_DEVICE_ID); +} + +static int tps53681_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + /* + * For reading the total output current (READ_IOUT) for all phases, + * the chip datasheet is a bit vague. It says "PHASE must be set to + * FFh to access all phases simultaneously. PHASE may also be set to + * 80h readack (!) the total phase current". + * Experiments show that the command does _not_ report the total + * current for all phases if the phase is set to 0xff. Instead, it + * appears to report the current of one of the phases. Override phase + * parameter with 0x80 when reading the total output current on page 0. + */ + if (reg == PMBUS_READ_IOUT && page == 0 && phase == 0xff) + return pmbus_read_word_data(client, page, 0x80, reg); + return -ENODATA; +} + static struct pmbus_driver_info tps53679_info = { - .pages = TPS53679_PAGE_NUM, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, - .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | + PMBUS_HAVE_STATUS_INPUT | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT, - .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT, - .identify = tps53679_identify, + .pfunc[0] = PMBUS_HAVE_IOUT, + .pfunc[1] = PMBUS_HAVE_IOUT, + .pfunc[2] = PMBUS_HAVE_IOUT, + .pfunc[3] = PMBUS_HAVE_IOUT, + .pfunc[4] = PMBUS_HAVE_IOUT, + .pfunc[5] = PMBUS_HAVE_IOUT, }; static int tps53679_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct device *dev = &client->dev; struct pmbus_driver_info *info; + enum chips chip_id; + + if (dev->of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; - info = devm_kmemdup(&client->dev, &tps53679_info, sizeof(*info), - GFP_KERNEL); + info = devm_kmemdup(dev, &tps53679_info, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; + switch (chip_id) { + case tps53647: + case tps53667: + info->pages = TPS53647_PAGE_NUM; + info->identify = tps53679_identify; + break; + case tps53679: + case tps53688: + info->pages = TPS53679_PAGE_NUM; + info->identify = tps53679_identify; + break; + case tps53681: + info->pages = TPS53679_PAGE_NUM; + info->phases[0] = 6; + info->identify = tps53681_identify; + info->read_word_data = tps53681_read_word_data; + break; + default: + return -ENODEV; + } + return pmbus_do_probe(client, id, info); } static const struct i2c_device_id tps53679_id[] = { - {"tps53679", 0}, - {"tps53688", 0}, + {"tps53647", tps53647}, + {"tps53667", tps53667}, + {"tps53679", tps53679}, + {"tps53681", tps53681}, + {"tps53688", tps53688}, {} }; MODULE_DEVICE_TABLE(i2c, tps53679_id); static const struct of_device_id __maybe_unused tps53679_of_match[] = { - {.compatible = "ti,tps53679"}, - {.compatible = "ti,tps53688"}, + {.compatible = "ti,tps53647", .data = (void *)tps53647}, + {.compatible = "ti,tps53667", .data = (void *)tps53667}, + {.compatible = "ti,tps53679", .data = (void *)tps53679}, + {.compatible = "ti,tps53681", .data = (void *)tps53681}, + {.compatible = "ti,tps53688", .data = (void *)tps53688}, {} }; MODULE_DEVICE_TABLE(of, tps53679_of_match); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index 23ea3415f166..81f4c4f166cd 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -370,7 +370,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client, #ifdef CONFIG_DEBUG_FS static int ucd9000_get_mfr_status(struct i2c_client *client, u8 *buffer) { - int ret = pmbus_set_page(client, 0); + int ret = pmbus_set_page(client, 0, 0xff); if (ret < 0) return ret; diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c index ecd9b65627ec..d5103fc9e269 100644 --- a/drivers/hwmon/pmbus/xdpe12284.c +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -18,6 +18,60 @@ #define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */ #define XDPE122_PAGE_NUM 2 +static int xdpe122_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + long val; + s16 exponent; + s32 mantissa; + int ret; + + switch (reg) { + case PMBUS_VOUT_OV_FAULT_LIMIT: + case PMBUS_VOUT_UV_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + /* Convert register value to LINEAR11 data. */ + exponent = ((s16)ret) >> 11; + mantissa = ((s16)((ret & GENMASK(10, 0)) << 5)) >> 5; + val = mantissa * 1000L; + if (exponent >= 0) + val <<= exponent; + else + val >>= -exponent; + + /* Convert data to VID register. */ + switch (info->vrm_version[page]) { + case vr13: + if (val >= 500) + return 1 + DIV_ROUND_CLOSEST(val - 500, 10); + return 0; + case vr12: + if (val >= 250) + return 1 + DIV_ROUND_CLOSEST(val - 250, 5); + return 0; + case imvp9: + if (val >= 200) + return 1 + DIV_ROUND_CLOSEST(val - 200, 10); + return 0; + case amd625mv: + if (val >= 200 && val <= 1550) + return DIV_ROUND_CLOSEST((1550 - val) * 100, + 625); + return 0; + default: + return -EINVAL; + } + default: + return -ENODATA; + } + + return 0; +} + static int xdpe122_identify(struct i2c_client *client, struct pmbus_driver_info *info) { @@ -70,6 +124,7 @@ static struct pmbus_driver_info xdpe122_info = { PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, .identify = xdpe122_identify, + .read_word_data = xdpe122_read_word_data, }; static int xdpe122_probe(struct i2c_client *client, diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c index 190b898e404a..3a827d0a881d 100644 --- a/drivers/hwmon/pmbus/zl6100.c +++ b/drivers/hwmon/pmbus/zl6100.c @@ -125,7 +125,8 @@ static inline void zl6100_wait(const struct zl6100_data *data) } } -static int zl6100_read_word_data(struct i2c_client *client, int page, int reg) +static int zl6100_read_word_data(struct i2c_client *client, int page, + int phase, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); struct zl6100_data *data = to_zl6100_data(info); @@ -167,7 +168,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg) } zl6100_wait(data); - ret = pmbus_read_word_data(client, page, vreg); + ret = pmbus_read_word_data(client, page, phase, vreg); data->access = ktime_get(); if (ret < 0) return ret; diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 8264e849e588..e5d18dac8ee7 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -270,10 +270,10 @@ static int via_cputemp_down_prep(unsigned int cpu) } static const struct x86_cpu_id __initconst cputemp_ids[] = { - { X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */ - { X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */ - { X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */ - { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, }, + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 7ffadc2da57b..5a5120121e50 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -1346,8 +1346,13 @@ w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type, /* channel 0.., name 1.. */ if (!(data->have_temp & (1 << channel))) return 0; - if (attr == hwmon_temp_input || attr == hwmon_temp_label) + if (attr == hwmon_temp_input) return 0444; + if (attr == hwmon_temp_label) { + if (data->temp_label) + return 0444; + return 0; + } if (channel == 2 && data->temp3_val_only) return 0; if (attr == hwmon_temp_max) { diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 8e48c7458aa3..255f8f41c8ff 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win, if (old != expect) { ret = -EINVAL; - dev_warn_ratelimited(msc_dev(win->msc), - "expected lockout state %d, got %d\n", - expect, old); goto unlock; } @@ -741,6 +738,10 @@ unlock: /* from intel_th_msc_window_unlock(), don't warn if not locked */ if (expect == WIN_LOCKED && old == new) return 0; + + dev_warn_ratelimited(msc_dev(win->msc), + "expected lockout state %d, got %d\n", + expect, old); } return ret; @@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc) lockdep_assert_held(&msc->buf_mutex); if (msc->mode > MSC_MODE_MULTI) - return -ENOTSUPP; + return -EINVAL; if (msc->mode == MSC_MODE_MULTI) { if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) @@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, } else if (msc->mode == MSC_MODE_MULTI) { ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); } else { - ret = -ENOTSUPP; + ret = -EINVAL; } if (!ret) { @@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, if (ret >= 0) *ppos = iter->offset; } else { - ret = -ENOTSUPP; + ret = -EINVAL; } put_count: diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index e9d90b53bbc4..86aa6a46bcba 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -235,6 +235,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Elkhart Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Elkhart Lake */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c index b178a5495b67..360b5c03df95 100644 --- a/drivers/hwtracing/stm/p_sys-t.c +++ b/drivers/hwtracing/stm/p_sys-t.c @@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = { static inline bool sys_t_need_ts(struct sys_t_output *op) { if (op->node.ts_interval && - time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) { + time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) { op->ts_jiffies = jiffies; return true; @@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op) static bool sys_t_need_clock_sync(struct sys_t_output *op) { if (op->node.clocksync_interval && - time_after(op->clocksync_jiffies + op->node.clocksync_interval, - jiffies)) { + time_after(jiffies, + op->clocksync_jiffies + op->node.clocksync_interval)) { op->clocksync_jiffies = jiffies; return true; diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 5255d3755411..1de23b4f3809 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev) /* SCL Low Time */ writel(t_low, idev->base + ALTR_I2C_SCL_LOW); /* SDA Hold Time, 300ns */ - writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); + writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD); /* Mask all master interrupt bits */ altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 050adda7c1bd..05b35ac33ce3 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) pm_runtime_get_noresume(&pdev->dev); i2c_del_adapter(&dev->adapter); + devm_free_irq(&pdev->dev, dev->irq, dev); pci_free_irq_vectors(pdev); } diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 3a9e840a3546..a4a6825c8758 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev, if (ret == -ENOENT) retdesc = ERR_PTR(-EPROBE_DEFER); - if (ret != -EPROBE_DEFER) + if (PTR_ERR(retdesc) != -EPROBE_DEFER) dev_err(dev, "error trying to get descriptor: %d\n", ret); return retdesc; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 8497c7a95dd4..224f830f77f9 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&priv->adap); pm_runtime_disable(priv->dev); pm_runtime_set_suspended(priv->dev); + clk_disable_unprepare(priv->clk); return 0; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index ca4f096fef74..a9c03f5c3482 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -132,11 +132,6 @@ #define TCOBASE 0x050 #define TCOCTL 0x054 -#define ACPIBASE 0x040 -#define ACPIBASE_SMI_OFF 0x030 -#define ACPICTRL 0x044 -#define ACPICTRL_EN 0x080 - #define SBREG_BAR 0x10 #define SBREG_SMBCTRL 0xc6000c #define SBREG_SMBCTRL_DNV 0xcf000c @@ -1553,7 +1548,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden); spin_unlock(&p2sb_spinlock); - res = &tco_res[ICH_RES_MEM_OFF]; + res = &tco_res[1]; if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; else @@ -1563,7 +1558,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, res->flags = IORESOURCE_MEM; return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, - tco_res, 3, &spt_tco_platform_data, + tco_res, 2, &spt_tco_platform_data, sizeof(spt_tco_platform_data)); } @@ -1576,17 +1571,16 @@ static struct platform_device * i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { - return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, - tco_res, 2, &cnl_tco_platform_data, - sizeof(cnl_tco_platform_data)); + return platform_device_register_resndata(&pci_dev->dev, + "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data, + sizeof(cnl_tco_platform_data)); } static void i801_add_tco(struct i801_priv *priv) { - u32 base_addr, tco_base, tco_ctl, ctrl_val; struct pci_dev *pci_dev = priv->pci_dev; - struct resource tco_res[3], *res; - unsigned int devfn; + struct resource tco_res[2], *res; + u32 tco_base, tco_ctl; /* If we have ACPI based watchdog use that instead */ if (acpi_has_watchdog()) @@ -1601,30 +1595,15 @@ static void i801_add_tco(struct i801_priv *priv) return; memset(tco_res, 0, sizeof(tco_res)); - - res = &tco_res[ICH_RES_IO_TCO]; - res->start = tco_base & ~1; - res->end = res->start + 32 - 1; - res->flags = IORESOURCE_IO; - /* - * Power Management registers. + * Always populate the main iTCO IO resource here. The second entry + * for NO_REBOOT MMIO is filled by the SPT specific function. */ - devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2); - pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr); - - res = &tco_res[ICH_RES_IO_SMI]; - res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF; - res->end = res->start + 3; + res = &tco_res[0]; + res->start = tco_base & ~1; + res->end = res->start + 32 - 1; res->flags = IORESOURCE_IO; - /* - * Enable the ACPI I/O space. - */ - pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val); - ctrl_val |= ACPICTRL_EN; - pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val); - if (priv->features & FEATURE_TCO_CNL) priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res); else diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 16a67a64284a..b426fc956938 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -78,25 +78,6 @@ #define X1000_I2C_DC_STOP BIT(9) -static const char * const jz4780_i2c_abrt_src[] = { - "ABRT_7B_ADDR_NOACK", - "ABRT_10ADDR1_NOACK", - "ABRT_10ADDR2_NOACK", - "ABRT_XDATA_NOACK", - "ABRT_GCALL_NOACK", - "ABRT_GCALL_READ", - "ABRT_HS_ACKD", - "SBYTE_ACKDET", - "ABRT_HS_NORSTRT", - "SBYTE_NORSTRT", - "ABRT_10B_RD_NORSTRT", - "ABRT_MASTER_DIS", - "ARB_LOST", - "SLVFLUSH_TXFIFO", - "SLV_ARBLOST", - "SLVRD_INTX", -}; - #define JZ4780_I2C_INTST_IGC BIT(11) #define JZ4780_I2C_INTST_ISTT BIT(10) #define JZ4780_I2C_INTST_ISTP BIT(9) @@ -576,21 +557,8 @@ done: static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src) { - int i; - - dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src); - dev_err(&i2c->adap.dev, "device addr=%x\n", - jz4780_i2c_readw(i2c, JZ4780_I2C_TAR)); - dev_err(&i2c->adap.dev, "send cmd count:%d %d\n", - i2c->cmd, i2c->cmd_buf[i2c->cmd]); - dev_err(&i2c->adap.dev, "receive data count:%d %d\n", - i2c->cmd, i2c->data_buf[i2c->cmd]); - - for (i = 0; i < 16; i++) { - if (src & BIT(i)) - dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n", - i, jz4780_i2c_abrt_src[i]); - } + dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n", + src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]); } static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c, diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 62e18b4db0ed..f5d25ce00f03 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd) static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) { - unsigned long target = jiffies + msecs_to_jiffies(1000); u32 val; + int ret; - do { - val = readl(i2cd->regs + I2C_MST_CNTL); - if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER)) - break; - if ((val & I2C_MST_CNTL_STATUS) != - I2C_MST_CNTL_STATUS_BUS_BUSY) - break; - usleep_range(500, 600); - } while (time_is_after_jiffies(target)); - - if (time_is_before_jiffies(target)) { + ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val, + !(val & I2C_MST_CNTL_CYCLE_TRIGGER) || + (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY, + 500, 1000 * USEC_PER_MSEC); + + if (ret) { dev_err(i2cd->dev, "i2c timeout error %x\n", val); return -ETIMEDOUT; } diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index a7a81846d5b1..635dd697ac0b 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) int ret = 0; int irq; - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); /* If irq is 0, we do polling. */ if (irq < 0) irq = 0; diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 54e1fc8a495e..f7f7b5b64720 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) /** * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode * @i2c_dev: Controller's private data + * @max: Maximum amount of data to fill into the Tx FIFO * * This functions fills the Tx FIFO with fixed pattern when * in read mode to trigger clock. diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 8f3dbc97a057..8b0ff780919b 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { struct device *dev; + struct i2c_client *client; dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev); - return dev ? i2c_verify_client(dev) : NULL; + if (!dev) + return NULL; + + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c index 9e2e1406f85e..bb8e60dff988 100644 --- a/drivers/i3c/device.c +++ b/drivers/i3c/device.c @@ -213,40 +213,34 @@ i3c_device_match_id(struct i3c_device *i3cdev, { struct i3c_device_info devinfo; const struct i3c_device_id *id; + u16 manuf, part, ext_info; + bool rndpid; i3c_device_get_info(i3cdev, &devinfo); - /* - * The lower 32bits of the provisional ID is just filled with a random - * value, try to match using DCR info. - */ - if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) { - u16 manuf = I3C_PID_MANUF_ID(devinfo.pid); - u16 part = I3C_PID_PART_ID(devinfo.pid); - u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid); - - /* First try to match by manufacturer/part ID. */ - for (id = id_table; id->match_flags != 0; id++) { - if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) != - I3C_MATCH_MANUF_AND_PART) - continue; - - if (manuf != id->manuf_id || part != id->part_id) - continue; - - if ((id->match_flags & I3C_MATCH_EXTRA_INFO) && - ext_info != id->extra_info) - continue; - - return id; - } - } + manuf = I3C_PID_MANUF_ID(devinfo.pid); + part = I3C_PID_PART_ID(devinfo.pid); + ext_info = I3C_PID_EXTRA_INFO(devinfo.pid); + rndpid = I3C_PID_RND_LOWER_32BITS(devinfo.pid); - /* Fallback to DCR match. */ for (id = id_table; id->match_flags != 0; id++) { if ((id->match_flags & I3C_MATCH_DCR) && - id->dcr == devinfo.dcr) - return id; + id->dcr != devinfo.dcr) + continue; + + if ((id->match_flags & I3C_MATCH_MANUF) && + id->manuf_id != manuf) + continue; + + if ((id->match_flags & I3C_MATCH_PART) && + (rndpid || id->part_id != part)) + continue; + + if ((id->match_flags & I3C_MATCH_EXTRA_INFO) && + (rndpid || id->extra_info != ext_info)) + continue; + + return id; } return NULL; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 7f8f896fa0c3..d79cd6d54b3a 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -241,12 +241,34 @@ out: } static DEVICE_ATTR_RO(hdrcap); +static ssize_t modalias_show(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct i3c_device *i3c = dev_to_i3cdev(dev); + struct i3c_device_info devinfo; + u16 manuf, part, ext; + + i3c_device_get_info(i3c, &devinfo); + manuf = I3C_PID_MANUF_ID(devinfo.pid); + part = I3C_PID_PART_ID(devinfo.pid); + ext = I3C_PID_EXTRA_INFO(devinfo.pid); + + if (I3C_PID_RND_LOWER_32BITS(devinfo.pid)) + return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr, + manuf); + + return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X", + devinfo.dcr, manuf, part, ext); +} +static DEVICE_ATTR_RO(modalias); + static struct attribute *i3c_device_attrs[] = { &dev_attr_bcr.attr, &dev_attr_dcr.attr, &dev_attr_pid.attr, &dev_attr_dynamic_address.attr, &dev_attr_hdrcap.attr, + &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(i3c_device); @@ -267,7 +289,7 @@ static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env) devinfo.dcr, manuf); return add_uevent_var(env, - "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x", + "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X", devinfo.dcr, manuf, part, ext); } @@ -1953,7 +1975,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master, * DEFSLVS command. */ if (boardinfo->base.flags & I2C_CLIENT_TEN) { - dev_err(&master->dev, "I2C device with 10 bit address not supported."); + dev_err(dev, "I2C device with 10 bit address not supported."); return -ENOTSUPP; } @@ -2138,7 +2160,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master) * correctly even if one or more i2c devices are not registered. */ i3c_bus_for_each_i2cdev(&master->bus, i2cdev) - i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base); + i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base); return 0; } diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index bd26c3b9634e..5c5306cd50ec 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -221,7 +221,7 @@ struct dw_i3c_xfer { struct completion comp; int ret; unsigned int ncmds; - struct dw_i3c_cmd cmds[0]; + struct dw_i3c_cmd cmds[]; }; struct dw_i3c_master { diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 54712793709e..3fee8bd7fe20 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -388,7 +388,7 @@ struct cdns_i3c_xfer { struct completion comp; int ret; unsigned int ncmds; - struct cdns_i3c_cmd cmds[0]; + struct cdns_i3c_cmd cmds[]; }; struct cdns_i3c_data { diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index 1bb99b556393..05c26986637b 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -361,7 +361,7 @@ static const struct block_device_operations ide_gd_ops = { .release = ide_gd_release, .ioctl = ide_gd_ioctl, #ifdef CONFIG_COMPAT - .ioctl = ide_gd_compat_ioctl, + .compat_ioctl = ide_gd_compat_ioctl, #endif .getgeo = ide_gd_getgeo, .check_events = ide_gd_check_events, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index d55606608ac8..f4495841bf68 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -2,8 +2,9 @@ /* * intel_idle.c - native hardware idle loop for modern Intel processors * - * Copyright (c) 2013, Intel Corporation. + * Copyright (c) 2013 - 2020, Intel Corporation. * Len Brown <len.brown@intel.com> + * Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ /* @@ -25,11 +26,6 @@ /* * Known limitations * - * The driver currently initializes for_each_online_cpu() upon modprobe. - * It it unaware of subsequent processors hot-added to the system. - * This means that if you boot with maxcpus=n and later online - * processors above n, those processors will use C1 only. - * * ACPI has a .suspend hack to turn off deep c-statees during suspend * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. @@ -55,7 +51,7 @@ #include <asm/mwait.h> #include <asm/msr.h> -#define INTEL_IDLE_VERSION "0.4.1" +#define INTEL_IDLE_VERSION "0.5.1" static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", @@ -65,11 +61,12 @@ static struct cpuidle_driver intel_idle_driver = { static int max_cstate = CPUIDLE_STATE_MAX - 1; static unsigned int disabled_states_mask; -static unsigned int mwait_substates; +static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; + +static unsigned long auto_demotion_disable_flags; +static bool disable_promotion_to_c1e; -#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF -/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ -static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ +static bool lapic_timer_always_reliable; struct idle_cpu { struct cpuidle_state *state_table; @@ -84,13 +81,10 @@ struct idle_cpu { bool use_acpi; }; -static const struct idle_cpu *icpu; -static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; -static int intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index); -static void intel_idle_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index); -static struct cpuidle_state *cpuidle_state_table; +static const struct idle_cpu *icpu __initdata; +static struct cpuidle_state *cpuidle_state_table __initdata; + +static unsigned int mwait_substates __initdata; /* * Enable this state by default even if the ACPI _CST does not list it. @@ -103,7 +97,7 @@ static struct cpuidle_state *cpuidle_state_table; * If this flag is set, SW flushes the TLB, so even if the * HW doesn't do the flushing, this flag is safe to use. */ -#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16) /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" @@ -115,12 +109,87 @@ static struct cpuidle_state *cpuidle_state_table; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +/** + * intel_idle - Ask the processor to enter the given idle state. + * @dev: cpuidle device of the target CPU. + * @drv: cpuidle driver (assumed to point to intel_idle_driver). + * @index: Target idle state index. + * + * Use the MWAIT instruction to notify the processor that the CPU represented by + * @dev is idle and it can try to enter the idle state corresponding to @index. + * + * If the local APIC timer is not known to be reliable in the target idle state, + * enable one-shot tick broadcasting for the target CPU before executing MWAIT. + * + * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to + * flushing user TLBs. + * + * Must be called under local_irq_disable(). + */ +static __cpuidle int intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + unsigned long ecx = 1; /* break on interrupt flag */ + bool uninitialized_var(tick); + int cpu = smp_processor_id(); + + /* + * leave_mm() to avoid costly and often unnecessary wakeups + * for flushing the user TLB's associated with the active mm. + */ + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) + leave_mm(cpu); + + if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) { + /* + * Switch over to one-shot tick broadcast if the target C-state + * is deeper than C1. + */ + if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) { + tick = true; + tick_broadcast_enter(); + } else { + tick = false; + } + } + + mwait_idle_with_hints(eax, ecx); + + if (!static_cpu_has(X86_FEATURE_ARAT) && tick) + tick_broadcast_exit(); + + return index; +} + +/** + * intel_idle_s2idle - Ask the processor to enter the given idle state. + * @dev: cpuidle device of the target CPU. + * @drv: cpuidle driver (assumed to point to intel_idle_driver). + * @index: Target idle state index. + * + * Use the MWAIT instruction to notify the processor that the CPU represented by + * @dev is idle and it can try to enter the idle state corresponding to @index. + * + * Invoked as a suspend-to-idle callback routine with frozen user space, frozen + * scheduler tick and suspended scheduler clock on the target CPU. + */ +static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + unsigned long eax = flg2MWAIT(drv->states[index].flags); + unsigned long ecx = 1; /* break on interrupt flag */ + + mwait_idle_with_hints(eax, ecx); +} + /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. * Thus C0 is a dummy. */ -static struct cpuidle_state nehalem_cstates[] = { +static struct cpuidle_state nehalem_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -157,7 +226,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = NULL } }; -static struct cpuidle_state snb_cstates[] = { +static struct cpuidle_state snb_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -202,7 +271,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = NULL } }; -static struct cpuidle_state byt_cstates[] = { +static struct cpuidle_state byt_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -247,7 +316,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = NULL } }; -static struct cpuidle_state cht_cstates[] = { +static struct cpuidle_state cht_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -292,7 +361,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = NULL } }; -static struct cpuidle_state ivb_cstates[] = { +static struct cpuidle_state ivb_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -337,7 +406,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = NULL } }; -static struct cpuidle_state ivt_cstates[] = { +static struct cpuidle_state ivt_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -374,7 +443,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = NULL } }; -static struct cpuidle_state ivt_cstates_4s[] = { +static struct cpuidle_state ivt_cstates_4s[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -411,7 +480,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = NULL } }; -static struct cpuidle_state ivt_cstates_8s[] = { +static struct cpuidle_state ivt_cstates_8s[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -448,7 +517,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = NULL } }; -static struct cpuidle_state hsw_cstates[] = { +static struct cpuidle_state hsw_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -516,7 +585,7 @@ static struct cpuidle_state hsw_cstates[] = { { .enter = NULL } }; -static struct cpuidle_state bdw_cstates[] = { +static struct cpuidle_state bdw_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -585,7 +654,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = NULL } }; -static struct cpuidle_state skl_cstates[] = { +static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -654,7 +723,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = NULL } }; -static struct cpuidle_state skx_cstates[] = { +static struct cpuidle_state skx_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -683,7 +752,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = NULL } }; -static struct cpuidle_state atom_cstates[] = { +static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", .desc = "MWAIT 0x00", @@ -719,7 +788,7 @@ static struct cpuidle_state atom_cstates[] = { { .enter = NULL } }; -static struct cpuidle_state tangier_cstates[] = { +static struct cpuidle_state tangier_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -763,7 +832,7 @@ static struct cpuidle_state tangier_cstates[] = { { .enter = NULL } }; -static struct cpuidle_state avn_cstates[] = { +static struct cpuidle_state avn_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -783,7 +852,7 @@ static struct cpuidle_state avn_cstates[] = { { .enter = NULL } }; -static struct cpuidle_state knl_cstates[] = { +static struct cpuidle_state knl_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -804,7 +873,7 @@ static struct cpuidle_state knl_cstates[] = { .enter = NULL } }; -static struct cpuidle_state bxt_cstates[] = { +static struct cpuidle_state bxt_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -865,7 +934,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = NULL } }; -static struct cpuidle_state dnv_cstates[] = { +static struct cpuidle_state dnv_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", @@ -894,225 +963,164 @@ static struct cpuidle_state dnv_cstates[] = { .enter = NULL } }; -/** - * intel_idle - * @dev: cpuidle_device - * @drv: cpuidle driver - * @index: index of cpuidle state - * - * Must be called under local_irq_disable(). - */ -static __cpuidle int intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) -{ - unsigned long ecx = 1; /* break on interrupt flag */ - struct cpuidle_state *state = &drv->states[index]; - unsigned long eax = flg2MWAIT(state->flags); - unsigned int cstate; - bool uninitialized_var(tick); - int cpu = smp_processor_id(); - - /* - * leave_mm() to avoid costly and often unnecessary wakeups - * for flushing the user TLB's associated with the active mm. - */ - if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) - leave_mm(cpu); - - if (!static_cpu_has(X86_FEATURE_ARAT)) { - cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & - MWAIT_CSTATE_MASK) + 1; - tick = false; - if (!(lapic_timer_reliable_states & (1 << (cstate)))) { - tick = true; - tick_broadcast_enter(); - } - } - - mwait_idle_with_hints(eax, ecx); - - if (!static_cpu_has(X86_FEATURE_ARAT) && tick) - tick_broadcast_exit(); - - return index; -} - -/** - * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle - * @dev: cpuidle_device - * @drv: cpuidle driver - * @index: state index - */ -static void intel_idle_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) -{ - unsigned long ecx = 1; /* break on interrupt flag */ - unsigned long eax = flg2MWAIT(drv->states[index].flags); - - mwait_idle_with_hints(eax, ecx); -} - -static const struct idle_cpu idle_cpu_nehalem = { +static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_nhx = { +static const struct idle_cpu idle_cpu_nhx __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_atom = { +static const struct idle_cpu idle_cpu_atom __initconst = { .state_table = atom_cstates, }; -static const struct idle_cpu idle_cpu_tangier = { +static const struct idle_cpu idle_cpu_tangier __initconst = { .state_table = tangier_cstates, }; -static const struct idle_cpu idle_cpu_lincroft = { +static const struct idle_cpu idle_cpu_lincroft __initconst = { .state_table = atom_cstates, .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, }; -static const struct idle_cpu idle_cpu_snb = { +static const struct idle_cpu idle_cpu_snb __initconst = { .state_table = snb_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_snx = { +static const struct idle_cpu idle_cpu_snx __initconst = { .state_table = snb_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_byt = { +static const struct idle_cpu idle_cpu_byt __initconst = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, .byt_auto_demotion_disable_flag = true, }; -static const struct idle_cpu idle_cpu_cht = { +static const struct idle_cpu idle_cpu_cht __initconst = { .state_table = cht_cstates, .disable_promotion_to_c1e = true, .byt_auto_demotion_disable_flag = true, }; -static const struct idle_cpu idle_cpu_ivb = { +static const struct idle_cpu idle_cpu_ivb __initconst = { .state_table = ivb_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_ivt = { +static const struct idle_cpu idle_cpu_ivt __initconst = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_hsw = { +static const struct idle_cpu idle_cpu_hsw __initconst = { .state_table = hsw_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_hsx = { +static const struct idle_cpu idle_cpu_hsx __initconst = { .state_table = hsw_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_bdw = { +static const struct idle_cpu idle_cpu_bdw __initconst = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_bdx = { +static const struct idle_cpu idle_cpu_bdx __initconst = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_skl = { +static const struct idle_cpu idle_cpu_skl __initconst = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_skx = { +static const struct idle_cpu idle_cpu_skx __initconst = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_avn = { +static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_knl = { +static const struct idle_cpu idle_cpu_knl __initconst = { .state_table = knl_cstates, .use_acpi = true, }; -static const struct idle_cpu idle_cpu_bxt = { +static const struct idle_cpu idle_cpu_bxt __initconst = { .state_table = bxt_cstates, .disable_promotion_to_c1e = true, }; -static const struct idle_cpu idle_cpu_dnv = { +static const struct idle_cpu idle_cpu_dnv __initconst = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, .use_acpi = true, }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx), - INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem), - INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx), - INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx), - INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom), - INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft), - INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx), - INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb), - INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx), - INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom), - INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt), - INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier), - INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht), - INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), - INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), - INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx), - INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), - INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), - INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx), - INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx), - INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), - INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), - INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), - INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl), - INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx), - INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl), - INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl), - INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), - INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), - INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv), - INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv), + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem), + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem), + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx), + X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft), + X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx), + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb), + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht), + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb), + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv), {} }; -#define INTEL_CPU_FAM6_MWAIT \ - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } - static const struct x86_cpu_id intel_mwait_ids[] __initconst = { - INTEL_CPU_FAM6_MWAIT, + X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), {} }; @@ -1273,11 +1281,11 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ -/* - * ivt_idle_state_table_update(void) +/** + * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. * - * Tune IVT multi-socket targets - * Assumption: num_sockets == (max_package_num + 1) + * Tune IVT multi-socket targets. + * Assumption: num_sockets == (max_package_num + 1). */ static void __init ivt_idle_state_table_update(void) { @@ -1323,11 +1331,11 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl) return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); } -/* - * bxt_idle_state_table_update(void) +/** + * bxt_idle_state_table_update - Fix up the Broxton idle states table. * - * On BXT, we trust the IRTL to show the definitive maximum latency - * We use the same value for target_residency. + * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the + * definitive maximum latency and use the same value for target_residency. */ static void __init bxt_idle_state_table_update(void) { @@ -1370,11 +1378,11 @@ static void __init bxt_idle_state_table_update(void) } } -/* - * sklh_idle_state_table_update(void) + +/** + * sklh_idle_state_table_update - Fix up the Sky Lake idle states table. * - * On SKL-H (model 0x5e) disable C8 and C9 if: - * C10 is enabled and SGX disabled + * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled. */ static void __init sklh_idle_state_table_update(void) { @@ -1485,9 +1493,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) } } -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states +/** + * intel_idle_cpuidle_driver_init - Create the list of available idle states. + * @drv: cpuidle driver structure to initialize. */ static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) { @@ -1509,7 +1517,7 @@ static void auto_demotion_disable(void) unsigned long long msr_bits; rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); - msr_bits &= ~(icpu->auto_demotion_disable_flags); + msr_bits &= ~auto_demotion_disable_flags; wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } @@ -1522,10 +1530,12 @@ static void c1e_promotion_disable(void) wrmsrl(MSR_IA32_POWER_CTL, msr_bits); } -/* - * intel_idle_cpu_init() - * allocate, initialize, register cpuidle_devices - * @cpu: cpu/core to initialize +/** + * intel_idle_cpu_init - Register the target CPU with the cpuidle core. + * @cpu: CPU to initialize. + * + * Register a cpuidle device object for @cpu and update its MSRs in accordance + * with the processor model flags. */ static int intel_idle_cpu_init(unsigned int cpu) { @@ -1539,13 +1549,10 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } - if (!icpu) - return 0; - - if (icpu->auto_demotion_disable_flags) + if (auto_demotion_disable_flags) auto_demotion_disable(); - if (icpu->disable_promotion_to_c1e) + if (disable_promotion_to_c1e) c1e_promotion_disable(); return 0; @@ -1555,7 +1562,7 @@ static int intel_idle_cpu_online(unsigned int cpu) { struct cpuidle_device *dev; - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) + if (!lapic_timer_always_reliable) tick_broadcast_enable(); /* @@ -1623,6 +1630,8 @@ static int __init intel_idle_init(void) icpu = (const struct idle_cpu *)id->driver_data; if (icpu) { cpuidle_state_table = icpu->state_table; + auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; + disable_promotion_to_c1e = icpu->disable_promotion_to_c1e; if (icpu->use_acpi || force_use_acpi) intel_idle_acpi_cst_extract(); } else if (!intel_idle_acpi_cst_extract()) { @@ -1647,15 +1656,15 @@ static int __init intel_idle_init(void) } if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ - lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; + lapic_timer_always_reliable = true; retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", intel_idle_cpu_online, NULL); if (retval < 0) goto hp_setup_fail; - pr_debug("lapic_timer_reliable_states 0x%x\n", - lapic_timer_reliable_states); + pr_debug("Local APIC timer is reliable in %s\n", + lapic_timer_always_reliable ? "all C-states" : "C1"); return 0; diff --git a/drivers/iio/TODO b/drivers/iio/TODO new file mode 100644 index 000000000000..7d7326b7085a --- /dev/null +++ b/drivers/iio/TODO @@ -0,0 +1,19 @@ +2020-02-29 + +Documentation + - Binding docs for devices that are obviously used via device +tree + - Yaml conversions for abandoned drivers + - ABI Documentation + - Audit driviers/iio/staging/Documentation + +- Replace iio_dev->mlock by either a local lock or use +iio_claim_direct.(Requires analysis of the purpose of the lock.) + +- Converting drivers from device tree centric to more generic +property handlers. + +- Refactor old platform_data constructs from drivers and convert it +to state struct and using property handlers and readers. + +Mailing list: linux-iio@vger.kernel.org diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index 0f0f27a8184e..4154e7396bbe 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -246,6 +246,7 @@ static const struct adis_data adis16201_data = { .diag_stat_reg = ADIS16201_DIAG_STAT_REG, .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN, + .self_test_reg = ADIS16201_MSC_CTRL_REG, .self_test_no_autoclear = true, .timeouts = &adis16201_timeouts, diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c index c6dbd2424e10..31d45e7c5485 100644 --- a/drivers/iio/accel/adis16209.c +++ b/drivers/iio/accel/adis16209.c @@ -256,6 +256,7 @@ static const struct adis_data adis16209_data = { .diag_stat_reg = ADIS16209_STAT_REG, .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN, + .self_test_reg = ADIS16209_MSC_CTRL_REG, .self_test_no_autoclear = true, .timeouts = &adis16209_timeouts, diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index 67b8817995c0..60daf04ce188 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = { .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ + .endianness = IIO_BE, \ }, \ } diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c index 633955d764cc..6b283be26ebc 100644 --- a/drivers/iio/accel/st_accel_i2c.c +++ b/drivers/iio/accel/st_accel_i2c.c @@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id st_accel_acpi_match[] = { - {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME}, + {"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME}, {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME}, { }, }; @@ -147,12 +147,9 @@ static int st_accel_i2c_probe(struct i2c_client *client) const struct st_sensor_settings *settings; struct st_sensor_data *adata; struct iio_dev *indio_dev; - const char *match; int ret; - match = device_get_match_data(&client->dev); - if (match) - strlcpy(client->name, match, sizeof(client->name)); + st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name)); settings = st_accel_get_settings(client->name); if (!settings) { diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 82e33082958c..f4da821c4022 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -39,6 +39,18 @@ config AD7124 To compile this driver as a module, choose M here: the module will be called ad7124. +config AD7192 + tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver" + depends on SPI + select AD_SIGMA_DELTA + help + Say yes here to build support for Analog Devices AD7190, + AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC). + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad7192. + config AD7266 tristate "Analog Devices AD7265/AD7266 ADC driver" depends on SPI_MASTER diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 919228900df9..8462455b4228 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o obj-$(CONFIG_AD7124) += ad7124.o +obj-$(CONFIG_AD7192) += ad7192.o obj-$(CONFIG_AD7266) += ad7266.o obj-$(CONFIG_AD7291) += ad7291.o obj-$(CONFIG_AD7292) += ad7292.o diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index d9915dc71d1e..a3c0647a5391 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -70,6 +70,11 @@ /* AD7124_FILTER_X */ #define AD7124_FILTER_FS_MSK GENMASK(10, 0) #define AD7124_FILTER_FS(x) FIELD_PREP(AD7124_FILTER_FS_MSK, x) +#define AD7124_FILTER_TYPE_MSK GENMASK(23, 21) +#define AD7124_FILTER_TYPE_SEL(x) FIELD_PREP(AD7124_FILTER_TYPE_MSK, x) + +#define AD7124_SINC3_FILTER 2 +#define AD7124_SINC4_FILTER 0 enum ad7124_ids { ID_AD7124_4, @@ -93,6 +98,14 @@ static const unsigned int ad7124_gain[8] = { 1, 2, 4, 8, 16, 32, 64, 128 }; +static const unsigned int ad7124_reg_size[] = { + 1, 2, 3, 3, 2, 1, 3, 3, 1, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3 +}; + static const int ad7124_master_clk_freq_hz[3] = { [AD7124_LOW_POWER] = 76800, [AD7124_MID_POWER] = 153600, @@ -119,6 +132,7 @@ struct ad7124_channel_config { unsigned int vref_mv; unsigned int pga_bits; unsigned int odr; + unsigned int filter_type; }; struct ad7124_state { @@ -138,7 +152,8 @@ static const struct iio_chan_spec ad7124_channel_template = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) | - BIT(IIO_CHAN_INFO_SAMP_FREQ), + BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), .scan_type = { .sign = 'u', .realbits = 24, @@ -281,6 +296,58 @@ static int ad7124_set_channel_gain(struct ad7124_state *st, return 0; } +static int ad7124_get_3db_filter_freq(struct ad7124_state *st, + unsigned int channel) +{ + unsigned int fadc; + + fadc = st->channel_config[channel].odr; + + switch (st->channel_config[channel].filter_type) { + case AD7124_SINC3_FILTER: + return DIV_ROUND_CLOSEST(fadc * 230, 1000); + case AD7124_SINC4_FILTER: + return DIV_ROUND_CLOSEST(fadc * 262, 1000); + default: + return -EINVAL; + } +} + +static int ad7124_set_3db_filter_freq(struct ad7124_state *st, + unsigned int channel, + unsigned int freq) +{ + unsigned int sinc4_3db_odr; + unsigned int sinc3_3db_odr; + unsigned int new_filter; + unsigned int new_odr; + + sinc4_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 230); + sinc3_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 262); + + if (sinc4_3db_odr > sinc3_3db_odr) { + new_filter = AD7124_SINC3_FILTER; + new_odr = sinc4_3db_odr; + } else { + new_filter = AD7124_SINC4_FILTER; + new_odr = sinc3_3db_odr; + } + + if (st->channel_config[channel].filter_type != new_filter) { + int ret; + + st->channel_config[channel].filter_type = new_filter; + ret = ad7124_spi_write_mask(st, AD7124_FILTER(channel), + AD7124_FILTER_TYPE_MSK, + AD7124_FILTER_TYPE_SEL(new_filter), + 3); + if (ret < 0) + return ret; + } + + return ad7124_set_channel_odr(st, channel, new_odr); +} + static int ad7124_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) @@ -323,6 +390,9 @@ static int ad7124_read_raw(struct iio_dev *indio_dev, *val = st->channel_config[chan->address].odr; return IIO_VAL_INT; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + *val = ad7124_get_3db_filter_freq(st, chan->scan_index); + return IIO_VAL_INT; default: return -EINVAL; } @@ -355,11 +425,37 @@ static int ad7124_write_raw(struct iio_dev *indio_dev, gain = DIV_ROUND_CLOSEST(res, val2); return ad7124_set_channel_gain(st, chan->address, gain); + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + if (val2 != 0) + return -EINVAL; + + return ad7124_set_3db_filter_freq(st, chan->address, val); default: return -EINVAL; } } +static int ad7124_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int writeval, + unsigned int *readval) +{ + struct ad7124_state *st = iio_priv(indio_dev); + int ret; + + if (reg >= ARRAY_SIZE(ad7124_reg_size)) + return -EINVAL; + + if (readval) + ret = ad_sd_read_reg(&st->sd, reg, ad7124_reg_size[reg], + readval); + else + ret = ad_sd_write_reg(&st->sd, reg, ad7124_reg_size[reg], + writeval); + + return ret; +} + static IIO_CONST_ATTR(in_voltage_scale_available, "0.000001164 0.000002328 0.000004656 0.000009313 0.000018626 0.000037252 0.000074505 0.000149011 0.000298023"); @@ -375,6 +471,7 @@ static const struct attribute_group ad7124_attrs_group = { static const struct iio_info ad7124_info = { .read_raw = ad7124_read_raw, .write_raw = ad7124_write_raw, + .debugfs_reg_access = &ad7124_reg_access, .validate_trigger = ad_sd_validate_trigger, .attrs = &ad7124_attrs_group, }; diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index bf3e2a9cc07f..02981f3d1794 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/sched.h> #include <linux/delay.h> +#include <linux/of_device.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -156,8 +157,8 @@ */ enum { - AD7192_SYSCALIB_ZERO_SCALE, - AD7192_SYSCALIB_FULL_SCALE, + AD7192_SYSCALIB_ZERO_SCALE, + AD7192_SYSCALIB_FULL_SCALE, }; struct ad7192_state { @@ -786,76 +787,105 @@ static const struct iio_info ad7195_info = { .validate_trigger = ad_sd_validate_trigger, }; +#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \ + _type, _mask_type_av, _ext_info) \ + { \ + .type = (_type), \ + .differential = ((_channel2) == -1 ? 0 : 1), \ + .indexed = 1, \ + .channel = (_channel1), \ + .channel2 = (_channel2), \ + .address = (_address), \ + .extend_name = (_extend_name), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ + .info_mask_shared_by_type_available = (_mask_type_av), \ + .ext_info = (_ext_info), \ + .scan_index = (_si), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 24, \ + .storagebits = 32, \ + .endianness = IIO_BE, \ + }, \ + } + +#define AD719x_DIFF_CHANNEL(_si, _channel1, _channel2, _address) \ + __AD719x_CHANNEL(_si, _channel1, _channel2, _address, NULL, \ + IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE), \ + ad7192_calibsys_ext_info) + +#define AD719x_CHANNEL(_si, _channel1, _address) \ + __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) + +#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \ + __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) + +#define AD719x_TEMP_CHANNEL(_si, _address) \ + __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL) + static const struct iio_chan_spec ad7192_channels[] = { - AD_SD_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M, 24, 32, 0), - AD_SD_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M, 24, 32, 0), - AD_SD_TEMP_CHANNEL(2, AD7192_CH_TEMP, 24, 32, 0), - AD_SD_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M, 24, 32, 0), - AD_SD_CHANNEL(4, 1, AD7192_CH_AIN1, 24, 32, 0), - AD_SD_CHANNEL(5, 2, AD7192_CH_AIN2, 24, 32, 0), - AD_SD_CHANNEL(6, 3, AD7192_CH_AIN3, 24, 32, 0), - AD_SD_CHANNEL(7, 4, AD7192_CH_AIN4, 24, 32, 0), + AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M), + AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M), + AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP), + AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M), + AD719x_CHANNEL(4, 1, AD7192_CH_AIN1), + AD719x_CHANNEL(5, 2, AD7192_CH_AIN2), + AD719x_CHANNEL(6, 3, AD7192_CH_AIN3), + AD719x_CHANNEL(7, 4, AD7192_CH_AIN4), IIO_CHAN_SOFT_TIMESTAMP(8), }; static const struct iio_chan_spec ad7193_channels[] = { - AD_SD_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M, 24, 32, 0), - AD_SD_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M, 24, 32, 0), - AD_SD_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M, 24, 32, 0), - AD_SD_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M, 24, 32, 0), - AD_SD_TEMP_CHANNEL(4, AD7193_CH_TEMP, 24, 32, 0), - AD_SD_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M, 24, 32, 0), - AD_SD_CHANNEL(6, 1, AD7193_CH_AIN1, 24, 32, 0), - AD_SD_CHANNEL(7, 2, AD7193_CH_AIN2, 24, 32, 0), - AD_SD_CHANNEL(8, 3, AD7193_CH_AIN3, 24, 32, 0), - AD_SD_CHANNEL(9, 4, AD7193_CH_AIN4, 24, 32, 0), - AD_SD_CHANNEL(10, 5, AD7193_CH_AIN5, 24, 32, 0), - AD_SD_CHANNEL(11, 6, AD7193_CH_AIN6, 24, 32, 0), - AD_SD_CHANNEL(12, 7, AD7193_CH_AIN7, 24, 32, 0), - AD_SD_CHANNEL(13, 8, AD7193_CH_AIN8, 24, 32, 0), + AD719x_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M), + AD719x_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M), + AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M), + AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M), + AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP), + AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M), + AD719x_CHANNEL(6, 1, AD7193_CH_AIN1), + AD719x_CHANNEL(7, 2, AD7193_CH_AIN2), + AD719x_CHANNEL(8, 3, AD7193_CH_AIN3), + AD719x_CHANNEL(9, 4, AD7193_CH_AIN4), + AD719x_CHANNEL(10, 5, AD7193_CH_AIN5), + AD719x_CHANNEL(11, 6, AD7193_CH_AIN6), + AD719x_CHANNEL(12, 7, AD7193_CH_AIN7), + AD719x_CHANNEL(13, 8, AD7193_CH_AIN8), IIO_CHAN_SOFT_TIMESTAMP(14), }; static int ad7192_channels_config(struct iio_dev *indio_dev) { struct ad7192_state *st = iio_priv(indio_dev); - const struct iio_chan_spec *channels; - struct iio_chan_spec *chan; - int i; switch (st->devid) { case ID_AD7193: - channels = ad7193_channels; + indio_dev->channels = ad7193_channels; indio_dev->num_channels = ARRAY_SIZE(ad7193_channels); break; default: - channels = ad7192_channels; + indio_dev->channels = ad7192_channels; indio_dev->num_channels = ARRAY_SIZE(ad7192_channels); break; } - chan = devm_kcalloc(indio_dev->dev.parent, indio_dev->num_channels, - sizeof(*chan), GFP_KERNEL); - if (!chan) - return -ENOMEM; - - indio_dev->channels = chan; - - for (i = 0; i < indio_dev->num_channels; i++) { - *chan = channels[i]; - chan->info_mask_shared_by_all |= - BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY); - if (chan->type != IIO_TEMP) { - chan->info_mask_shared_by_type_available |= - BIT(IIO_CHAN_INFO_SCALE); - chan->ext_info = ad7192_calibsys_ext_info; - } - chan++; - } - return 0; } +static const struct of_device_id ad7192_of_match[] = { + { .compatible = "adi,ad7190", .data = (void *)ID_AD7190 }, + { .compatible = "adi,ad7192", .data = (void *)ID_AD7192 }, + { .compatible = "adi,ad7193", .data = (void *)ID_AD7193 }, + { .compatible = "adi,ad7195", .data = (void *)ID_AD7195 }, + {} +}; +MODULE_DEVICE_TABLE(of, ad7192_of_match); + static int ad7192_probe(struct spi_device *spi) { struct ad7192_state *st; @@ -899,13 +929,16 @@ static int ad7192_probe(struct spi_device *spi) voltage_uv = regulator_get_voltage(st->avdd); - if (voltage_uv) + if (voltage_uv > 0) { st->int_vref_mv = voltage_uv / 1000; - else + } else { + ret = voltage_uv; dev_err(&spi->dev, "Device tree error, reference voltage undefined\n"); + goto error_disable_avdd; + } spi_set_drvdata(spi, indio_dev); - st->devid = spi_get_device_id(spi)->driver_data; + st->devid = (unsigned long)of_device_get_match_data(&spi->dev); indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; @@ -986,26 +1019,6 @@ static int ad7192_remove(struct spi_device *spi) return 0; } -static const struct spi_device_id ad7192_id[] = { - {"ad7190", ID_AD7190}, - {"ad7192", ID_AD7192}, - {"ad7193", ID_AD7193}, - {"ad7195", ID_AD7195}, - {} -}; - -MODULE_DEVICE_TABLE(spi, ad7192_id); - -static const struct of_device_id ad7192_of_match[] = { - { .compatible = "adi,ad7190" }, - { .compatible = "adi,ad7192" }, - { .compatible = "adi,ad7193" }, - { .compatible = "adi,ad7195" }, - {} -}; - -MODULE_DEVICE_TABLE(of, ad7192_of_match); - static struct spi_driver ad7192_driver = { .driver = { .name = "ad7192", @@ -1013,7 +1026,6 @@ static struct spi_driver ad7192_driver = { }, .probe = ad7192_probe, .remove = ad7192_remove, - .id_table = ad7192_id, }; module_spi_driver(ad7192_driver); diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c index a6798f7dfdb8..6595fd196288 100644 --- a/drivers/iio/adc/ad7292.c +++ b/drivers/iio/adc/ad7292.c @@ -122,7 +122,10 @@ static int ad7292_single_conversion(struct ad7292_state *st, { .tx_buf = &st->d8, .len = 4, - .delay_usecs = 6, + .delay = { + .value = 6, + .unit = SPI_DELAY_UNIT_USECS + }, }, { .rx_buf = &st->d16, .len = 2, diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index a5c7771227d5..9d96f7d08b95 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit); + u32 cor; if (!chan) continue; @@ -732,6 +733,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) continue; if (state) { + cor = at91_adc_readl(st, AT91_SAMA5D2_COR); + + if (chan->differential) + cor |= (BIT(chan->channel) | + BIT(chan->channel2)) << + AT91_SAMA5D2_COR_DIFF_OFFSET; + else + cor &= ~(BIT(chan->channel) << + AT91_SAMA5D2_COR_DIFF_OFFSET); + + at91_adc_writel(st, AT91_SAMA5D2_COR, cor); + } + + if (state) { at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel)); /* enable irq only if not using DMA */ diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index 2df7d057b249..22131a677445 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -836,8 +836,10 @@ static int exynos_adc_probe(struct platform_device *pdev) info->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(info->vdd)) { - dev_err(&pdev->dev, "failed getting regulator, err = %ld\n", - PTR_ERR(info->vdd)); + if (PTR_ERR(info->vdd) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed getting regulator, err = %ld\n", + PTR_ERR(info->vdd)); return PTR_ERR(info->vdd); } diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 3b6f3b9a6c5b..0c5d7aaf6826 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -71,7 +71,10 @@ static int max1118_read(struct spi_device *spi, int channel) */ { .len = 0, - .delay_usecs = 1, /* > CNVST Low Time 100 ns */ + .delay = { /* > CNVST Low Time 100 ns */ + .value = 1, + .unit = SPI_DELAY_UNIT_USECS + }, .cs_change = 1, }, /* @@ -81,7 +84,10 @@ static int max1118_read(struct spi_device *spi, int channel) */ { .len = 0, - .delay_usecs = 8, + .delay = { + .value = 8, + .unit = SPI_DELAY_UNIT_USECS + }, }, { .rx_buf = &adc->data, diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 465c7625a55a..2c0eb5de110c 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -421,7 +421,8 @@ static int mcp320x_probe(struct spi_device *spi) adc->transfer[1].len++; /* conversions are started by asserting CS pin for 8 usec */ - adc->start_conv_transfer.delay_usecs = 8; + adc->start_conv_transfer.delay.value = 8; + adc->start_conv_transfer.delay.unit = SPI_DELAY_UNIT_USECS; spi_message_init_with_transfers(&adc->start_conv_msg, &adc->start_conv_transfer, 1); diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c index a6170a37ebe8..83bad2d5575d 100644 --- a/drivers/iio/adc/npcm_adc.c +++ b/drivers/iio/adc/npcm_adc.c @@ -14,6 +14,7 @@ #include <linux/regulator/consumer.h> #include <linux/spinlock.h> #include <linux/uaccess.h> +#include <linux/reset.h> struct npcm_adc { bool int_status; @@ -23,13 +24,9 @@ struct npcm_adc { struct clk *adc_clk; wait_queue_head_t wq; struct regulator *vref; - struct regmap *rst_regmap; + struct reset_control *reset; }; -/* NPCM7xx reset module */ -#define NPCM7XX_IPSRST1_OFFSET 0x020 -#define NPCM7XX_IPSRST1_ADC_RST BIT(27) - /* ADC registers */ #define NPCM_ADCCON 0x00 #define NPCM_ADCDATA 0x04 @@ -106,13 +103,11 @@ static int npcm_adc_read(struct npcm_adc *info, int *val, u8 channel) msecs_to_jiffies(10)); if (ret == 0) { regtemp = ioread32(info->regs + NPCM_ADCCON); - if ((regtemp & NPCM_ADCCON_ADC_CONV) && info->rst_regmap) { + if (regtemp & NPCM_ADCCON_ADC_CONV) { /* if conversion failed - reset ADC module */ - regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET, - NPCM7XX_IPSRST1_ADC_RST); + reset_control_assert(info->reset); msleep(100); - regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET, - 0x0); + reset_control_deassert(info->reset); msleep(100); /* Enable ADC and start conversion module */ @@ -186,7 +181,6 @@ static int npcm_adc_probe(struct platform_device *pdev) struct npcm_adc *info; struct iio_dev *indio_dev; struct device *dev = &pdev->dev; - struct device_node *np = pdev->dev.of_node; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info)); if (!indio_dev) @@ -199,6 +193,10 @@ static int npcm_adc_probe(struct platform_device *pdev) if (IS_ERR(info->regs)) return PTR_ERR(info->regs); + info->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(info->reset)) + return PTR_ERR(info->reset); + info->adc_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(info->adc_clk)) { dev_warn(&pdev->dev, "ADC clock failed: can't read clk\n"); @@ -211,16 +209,6 @@ static int npcm_adc_probe(struct platform_device *pdev) div = div >> NPCM_ADCCON_DIV_SHIFT; info->adc_sample_hz = clk_get_rate(info->adc_clk) / ((div + 1) * 2); - if (of_device_is_compatible(np, "nuvoton,npcm750-adc")) { - info->rst_regmap = syscon_regmap_lookup_by_compatible - ("nuvoton,npcm750-rst"); - if (IS_ERR(info->rst_regmap)) { - dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-rst\n"); - ret = PTR_ERR(info->rst_regmap); - goto err_disable_clk; - } - } - irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -EINVAL; diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 2aad2cda6943..76a60d93fe23 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc, } } -static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p) -{ - struct iio_poll_func *pf = p; - struct iio_dev *indio_dev = pf->indio_dev; - struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - int available = stm32_dfsdm_adc_dma_residue(adc); - - while (available >= indio_dev->scan_bytes) { - s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi]; - - stm32_dfsdm_process_data(adc, buffer); - - iio_push_to_buffers_with_timestamp(indio_dev, buffer, - pf->timestamp); - available -= indio_dev->scan_bytes; - adc->bufi += indio_dev->scan_bytes; - if (adc->bufi >= adc->buf_sz) - adc->bufi = 0; - } - - iio_trigger_notify_done(indio_dev->trig); - - return IRQ_HANDLED; -} - static void stm32_dfsdm_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; @@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data) int available = stm32_dfsdm_adc_dma_residue(adc); size_t old_pos; - if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) { - iio_trigger_poll_chained(indio_dev->trig); - return; - } - /* * FIXME: In Kernel interface does not support cyclic DMA buffer,and * offers only an interface to push data samples per samples. @@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data) adc->bufi = 0; old_pos = 0; } - /* regular iio buffer without trigger */ + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ if (adc->dev_data->type == DFSDM_IIO) iio_push_to_buffers(indio_dev, buffer); } @@ -1536,8 +1514,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) } ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_dfsdm_adc_trigger_handler, + &iio_pollfunc_store_time, NULL, &stm32_dfsdm_buffer_setup_ops); if (ret) { stm32_dfsdm_dma_release(indio_dev); diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c index 4965246808bd..77620359b54c 100644 --- a/drivers/iio/adc/ti-tlc4541.c +++ b/drivers/iio/adc/ti-tlc4541.c @@ -189,7 +189,8 @@ static int tlc4541_probe(struct spi_device *spi) /* Setup default message */ st->scan_single_xfer[0].rx_buf = &st->rx_buf[0]; st->scan_single_xfer[0].len = 3; - st->scan_single_xfer[1].delay_usecs = 3; + st->scan_single_xfer[1].delay.value = 3; + st->scan_single_xfer[1].delay.unit = SPI_DELAY_UNIT_NSECS; st->scan_single_xfer[2].rx_buf = &st->rx_buf[0]; st->scan_single_xfer[2].len = 2; diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig index da7f126d197b..9b02c9a2bc8a 100644 --- a/drivers/iio/amplifiers/Kconfig +++ b/drivers/iio/amplifiers/Kconfig @@ -22,4 +22,14 @@ config AD8366 To compile this driver as a module, choose M here: the module will be called ad8366. +config HMC425 + tristate "Analog Devices HMC425A and similar GPIO Gain Amplifiers" + depends on GPIOLIB + help + Say yes here to build support for Analog Devices HMC425A and similar + gain amplifiers or step attenuators. + + To compile this driver as a module, choose M here: the + module will be called hmc425a. + endmenu diff --git a/drivers/iio/amplifiers/Makefile b/drivers/iio/amplifiers/Makefile index 9abef2ebe9bc..cb551d82f56b 100644 --- a/drivers/iio/amplifiers/Makefile +++ b/drivers/iio/amplifiers/Makefile @@ -5,3 +5,4 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_AD8366) += ad8366.o +obj-$(CONFIG_HMC425) += hmc425a.o diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c index 0176d3d8cc9c..62167b87caea 100644 --- a/drivers/iio/amplifiers/ad8366.c +++ b/drivers/iio/amplifiers/ad8366.c @@ -5,6 +5,7 @@ * AD8366 Dual-Digital Variable Gain Amplifier (VGA) * ADA4961 BiCMOS RF Digital Gain Amplifier (DGA) * ADL5240 Digitally controlled variable gain amplifier (VGA) + * HMC1119 0.25 dB LSB, 7-Bit, Silicon Digital Attenuator * * Copyright 2012-2019 Analog Devices Inc. */ @@ -27,6 +28,7 @@ enum ad8366_type { ID_AD8366, ID_ADA4961, ID_ADL5240, + ID_HMC1119, }; struct ad8366_info { @@ -62,6 +64,10 @@ static struct ad8366_info ad8366_infos[] = { .gain_min = -11500, .gain_max = 20000, }, + [ID_HMC1119] = { + .gain_min = -31750, + .gain_max = 0, + }, }; static int ad8366_write(struct iio_dev *indio_dev, @@ -84,6 +90,9 @@ static int ad8366_write(struct iio_dev *indio_dev, case ID_ADL5240: st->data[0] = (ch_a & 0x3F); break; + case ID_HMC1119: + st->data[0] = ch_a; + break; } ret = spi_write(st->spi, st->data, indio_dev->num_channels); @@ -118,6 +127,9 @@ static int ad8366_read_raw(struct iio_dev *indio_dev, case ID_ADL5240: gain = 20000 - 31500 + code * 500; break; + case ID_HMC1119: + gain = -1 * code * 250; + break; } /* Values in dB */ @@ -164,6 +176,9 @@ static int ad8366_write_raw(struct iio_dev *indio_dev, case ID_ADL5240: code = ((gain - 500 - 20000) / 500) & 0x3F; break; + case ID_HMC1119: + code = (abs(gain) / 250) & 0x7F; + break; } mutex_lock(&st->lock); @@ -180,9 +195,22 @@ static int ad8366_write_raw(struct iio_dev *indio_dev, return ret; } +static int ad8366_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_HARDWAREGAIN: + return IIO_VAL_INT_PLUS_MICRO_DB; + default: + return -EINVAL; + } +} + static const struct iio_info ad8366_info = { .read_raw = &ad8366_read_raw, .write_raw = &ad8366_write_raw, + .write_raw_get_fmt = &ad8366_write_raw_get_fmt, }; #define AD8366_CHAN(_channel) { \ @@ -233,6 +261,7 @@ static int ad8366_probe(struct spi_device *spi) break; case ID_ADA4961: case ID_ADL5240: + case ID_HMC1119: st->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); indio_dev->channels = ada4961_channels; @@ -285,6 +314,7 @@ static const struct spi_device_id ad8366_id[] = { {"ad8366", ID_AD8366}, {"ada4961", ID_ADA4961}, {"adl5240", ID_ADL5240}, + {"hmc1119", ID_HMC1119}, {} }; MODULE_DEVICE_TABLE(spi, ad8366_id); diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c new file mode 100644 index 000000000000..d9e6e9678ffc --- /dev/null +++ b/drivers/iio/amplifiers/hmc425a.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HMC425A and similar Gain Amplifiers + * + * Copyright 2020 Analog Devices Inc. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/regulator/consumer.h> +#include <linux/sysfs.h> + +enum hmc425a_type { + ID_HMC425A, +}; + +struct hmc425a_chip_info { + const char *name; + const struct iio_chan_spec *channels; + unsigned int num_channels; + unsigned int num_gpios; + int gain_min; + int gain_max; + int default_gain; +}; + +struct hmc425a_state { + struct regulator *reg; + struct mutex lock; /* protect sensor state */ + struct hmc425a_chip_info *chip_info; + struct gpio_descs *gpios; + enum hmc425a_type type; + u32 gain; +}; + +static int hmc425a_write(struct iio_dev *indio_dev, u32 value) +{ + struct hmc425a_state *st = iio_priv(indio_dev); + DECLARE_BITMAP(values, BITS_PER_TYPE(value)); + + values[0] = value; + + gpiod_set_array_value_cansleep(st->gpios->ndescs, st->gpios->desc, + NULL, values); + return 0; +} + +static int hmc425a_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long m) +{ + struct hmc425a_state *st = iio_priv(indio_dev); + int code, gain = 0; + int ret; + + mutex_lock(&st->lock); + switch (m) { + case IIO_CHAN_INFO_HARDWAREGAIN: + code = st->gain; + + switch (st->type) { + case ID_HMC425A: + gain = ~code * -500; + break; + } + + *val = gain / 1000; + *val2 = (gain % 1000) * 1000; + + ret = IIO_VAL_INT_PLUS_MICRO_DB; + break; + default: + ret = -EINVAL; + } + mutex_unlock(&st->lock); + + return ret; +}; + +static int hmc425a_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct hmc425a_state *st = iio_priv(indio_dev); + struct hmc425a_chip_info *inf = st->chip_info; + int code = 0, gain; + int ret; + + if (val < 0) + gain = (val * 1000) - (val2 / 1000); + else + gain = (val * 1000) + (val2 / 1000); + + if (gain > inf->gain_max || gain < inf->gain_min) + return -EINVAL; + + switch (st->type) { + case ID_HMC425A: + code = ~((abs(gain) / 500) & 0x3F); + break; + } + + mutex_lock(&st->lock); + switch (mask) { + case IIO_CHAN_INFO_HARDWAREGAIN: + st->gain = code; + + ret = hmc425a_write(indio_dev, st->gain); + break; + default: + ret = -EINVAL; + } + mutex_unlock(&st->lock); + + return ret; +} + +static int hmc425a_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_HARDWAREGAIN: + return IIO_VAL_INT_PLUS_MICRO_DB; + default: + return -EINVAL; + } +} + +static const struct iio_info hmc425a_info = { + .read_raw = &hmc425a_read_raw, + .write_raw = &hmc425a_write_raw, + .write_raw_get_fmt = &hmc425a_write_raw_get_fmt, +}; + +#define HMC425A_CHAN(_channel) \ +{ \ + .type = IIO_VOLTAGE, \ + .output = 1, \ + .indexed = 1, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \ +} + +static const struct iio_chan_spec hmc425a_channels[] = { + HMC425A_CHAN(0), +}; + +/* Match table for of_platform binding */ +static const struct of_device_id hmc425a_of_match[] = { + { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A }, + {}, +}; +MODULE_DEVICE_TABLE(of, hmc425a_of_match); + +static void hmc425a_reg_disable(void *data) +{ + struct hmc425a_state *st = data; + + regulator_disable(st->reg); +} + +static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = { + [ID_HMC425A] = { + .name = "hmc425a", + .channels = hmc425a_channels, + .num_channels = ARRAY_SIZE(hmc425a_channels), + .num_gpios = 6, + .gain_min = -31500, + .gain_max = 0, + .default_gain = -0x40, /* set default gain -31.5db*/ + }, +}; + +static int hmc425a_probe(struct platform_device *pdev) +{ + struct iio_dev *indio_dev; + struct hmc425a_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + st->type = (enum hmc425a_type)of_device_get_match_data(&pdev->dev); + + st->chip_info = &hmc425a_chip_info_tbl[st->type]; + indio_dev->num_channels = st->chip_info->num_channels; + indio_dev->channels = st->chip_info->channels; + indio_dev->name = st->chip_info->name; + st->gain = st->chip_info->default_gain; + + st->gpios = devm_gpiod_get_array(&pdev->dev, "ctrl", GPIOD_OUT_LOW); + if (IS_ERR(st->gpios)) { + ret = PTR_ERR(st->gpios); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get gpios\n"); + return ret; + } + + if (st->gpios->ndescs != st->chip_info->num_gpios) { + dev_err(&pdev->dev, "%d GPIOs needed to operate\n", + st->chip_info->num_gpios); + return -ENODEV; + } + + st->reg = devm_regulator_get(&pdev->dev, "vcc-supply"); + if (IS_ERR(st->reg)) + return PTR_ERR(st->reg); + + ret = regulator_enable(st->reg); + if (ret) + return ret; + ret = devm_add_action_or_reset(&pdev->dev, hmc425a_reg_disable, st); + if (ret) + return ret; + + mutex_init(&st->lock); + + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &hmc425a_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static struct platform_driver hmc425a_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = hmc425a_of_match, + }, + .probe = hmc425a_probe, +}; +module_platform_driver(hmc425a_driver); + +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Analog Devices HMC425A and similar GPIO control Gain Amplifiers"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index 0b91de4df8f4..a7e65a59bf42 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -91,6 +91,8 @@ config SPS30 tristate "SPS30 particulate matter sensor" depends on I2C select CRC8 + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Sensirion SPS30 particulate matter sensor. diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 2f0a6fed2589..82d470561ad3 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -48,6 +48,11 @@ #define ATLAS_REG_EC_CALIB_STATUS_LOW BIT(2) #define ATLAS_REG_EC_CALIB_STATUS_HIGH BIT(3) +#define ATLAS_REG_DO_CALIB_STATUS 0x09 +#define ATLAS_REG_DO_CALIB_STATUS_MASK 0x03 +#define ATLAS_REG_DO_CALIB_STATUS_PRESSURE BIT(0) +#define ATLAS_REG_DO_CALIB_STATUS_DO BIT(1) + #define ATLAS_REG_PH_TEMP_DATA 0x0e #define ATLAS_REG_PH_DATA 0x16 @@ -60,14 +65,19 @@ #define ATLAS_REG_ORP_CALIB_STATUS 0x0d #define ATLAS_REG_ORP_DATA 0x0e +#define ATLAS_REG_DO_TEMP_DATA 0x12 +#define ATLAS_REG_DO_DATA 0x22 + #define ATLAS_PH_INT_TIME_IN_MS 450 #define ATLAS_EC_INT_TIME_IN_MS 650 #define ATLAS_ORP_INT_TIME_IN_MS 450 +#define ATLAS_DO_INT_TIME_IN_MS 450 enum { ATLAS_PH_SM, ATLAS_EC_SM, ATLAS_ORP_SM, + ATLAS_DO_SM, }; struct atlas_data { @@ -76,6 +86,7 @@ struct atlas_data { struct atlas_device *chip; struct regmap *regmap; struct irq_work work; + unsigned int interrupt_enabled; __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */ }; @@ -121,7 +132,7 @@ static const struct iio_chan_spec atlas_ph_channels[] = { }, }; -#define ATLAS_EC_CHANNEL(_idx, _addr) \ +#define ATLAS_CONCENTRATION_CHANNEL(_idx, _addr) \ {\ .type = IIO_CONCENTRATION, \ .indexed = 1, \ @@ -152,8 +163,8 @@ static const struct iio_chan_spec atlas_ec_channels[] = { .endianness = IIO_BE, }, }, - ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA), - ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA), + ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_TDS_DATA), + ATLAS_CONCENTRATION_CHANNEL(1, ATLAS_REG_PSS_DATA), IIO_CHAN_SOFT_TIMESTAMP(3), { .type = IIO_TEMP, @@ -182,6 +193,19 @@ static const struct iio_chan_spec atlas_orp_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(1), }; +static const struct iio_chan_spec atlas_do_channels[] = { + ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_DO_DATA), + IIO_CHAN_SOFT_TIMESTAMP(1), + { + .type = IIO_TEMP, + .address = ATLAS_REG_DO_TEMP_DATA, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .output = 1, + .scan_index = -1 + }, +}; + static int atlas_check_ph_calibration(struct atlas_data *data) { struct device *dev = &data->client->dev; @@ -262,7 +286,31 @@ static int atlas_check_orp_calibration(struct atlas_data *data) dev_warn(dev, "device has not been calibrated\n"); return 0; -}; +} + +static int atlas_check_do_calibration(struct atlas_data *data) +{ + struct device *dev = &data->client->dev; + int ret; + unsigned int val; + + ret = regmap_read(data->regmap, ATLAS_REG_DO_CALIB_STATUS, &val); + if (ret) + return ret; + + if (!(val & ATLAS_REG_DO_CALIB_STATUS_MASK)) { + dev_warn(dev, "device has not been calibrated\n"); + return 0; + } + + if (!(val & ATLAS_REG_DO_CALIB_STATUS_PRESSURE)) + dev_warn(dev, "device missing atmospheric pressure calibration\n"); + + if (!(val & ATLAS_REG_DO_CALIB_STATUS_DO)) + dev_warn(dev, "device missing dissolved oxygen calibration\n"); + + return 0; +} struct atlas_device { const struct iio_chan_spec *channels; @@ -295,6 +343,13 @@ static struct atlas_device atlas_devices[] = { .calibration = &atlas_check_orp_calibration, .delay = ATLAS_ORP_INT_TIME_IN_MS, }, + [ATLAS_DO_SM] = { + .channels = atlas_do_channels, + .num_channels = 3, + .data_reg = ATLAS_REG_DO_DATA, + .calibration = &atlas_check_do_calibration, + .delay = ATLAS_DO_INT_TIME_IN_MS, + }, }; static int atlas_set_powermode(struct atlas_data *data, int on) @@ -304,6 +359,9 @@ static int atlas_set_powermode(struct atlas_data *data, int on) static int atlas_set_interrupt(struct atlas_data *data, bool state) { + if (!data->interrupt_enabled) + return 0; + return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL_EN, state ? ATLAS_REG_INT_CONTROL_EN : 0); @@ -507,6 +565,7 @@ static const struct i2c_device_id atlas_id[] = { { "atlas-ph-sm", ATLAS_PH_SM}, { "atlas-ec-sm", ATLAS_EC_SM}, { "atlas-orp-sm", ATLAS_ORP_SM}, + { "atlas-do-sm", ATLAS_DO_SM}, {} }; MODULE_DEVICE_TABLE(i2c, atlas_id); @@ -515,6 +574,7 @@ static const struct of_device_id atlas_dt_ids[] = { { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, }, { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, }, { .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, }, + { .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, }, { } }; MODULE_DEVICE_TABLE(of, atlas_dt_ids); @@ -572,11 +632,6 @@ static int atlas_probe(struct i2c_client *client, if (ret) return ret; - if (client->irq <= 0) { - dev_err(&client->dev, "no valid irq defined\n"); - return -EINVAL; - } - ret = chip->calibration(data); if (ret) return ret; @@ -596,16 +651,20 @@ static int atlas_probe(struct i2c_client *client, init_irq_work(&data->work, atlas_work_handler); - /* interrupt pin toggles on new conversion */ - ret = devm_request_threaded_irq(&client->dev, client->irq, - NULL, atlas_interrupt_handler, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "atlas_irq", - indio_dev); - if (ret) { - dev_err(&client->dev, "request irq (%d) failed\n", client->irq); - goto unregister_buffer; + if (client->irq > 0) { + /* interrupt pin toggles on new conversion */ + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, atlas_interrupt_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "atlas_irq", + indio_dev); + + if (ret) + dev_warn(&client->dev, + "request irq (%d) failed\n", client->irq); + else + data->interrupt_enabled = 1; } ret = atlas_set_powermode(data, 1); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index e051edbc43c1..0e35ff06f9af 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -328,6 +328,8 @@ static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev, return NULL; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2)) pdata->drdy_int_pin = (u8) val; else @@ -371,6 +373,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, /* If OF/DT pdata exists, it will take precedence of anything else */ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); + if (IS_ERR(of_pdata)) + return PTR_ERR(of_pdata); if (of_pdata) pdata = of_pdata; diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 979070196da9..93744011b63f 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -121,26 +121,6 @@ config AD5624R_SPI Say yes here to build support for Analog Devices AD5624R, AD5644R and AD5664R converters (DAC). This driver uses the common SPI interface. -config LTC1660 - tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver" - depends on SPI - help - Say yes here to build support for Linear Technology - LTC1660 and LTC1665 Digital to Analog Converters. - - To compile this driver as a module, choose M here: the - module will be called ltc1660. - -config LTC2632 - tristate "Linear Technology LTC2632-12/10/8 DAC spi driver" - depends on SPI - help - Say yes here to build support for Linear Technology - LTC2632-12, LTC2632-10, LTC2632-8 converters (DAC). - - To compile this driver as a module, choose M here: the - module will be called ltc2632. - config AD5686 tristate @@ -208,6 +188,16 @@ config AD5764 To compile this driver as a module, choose M here: the module will be called ad5764. +config AD5770R + tristate "Analog Devices AD5770R IDAC driver" + depends on SPI_MASTER + help + Say yes here to build support for Analog Devices AD5770R Digital to + Analog Converter. + + To compile this driver as a module, choose M here: the + module will be called ad5770r. + config AD5791 tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver" depends on SPI @@ -229,16 +219,6 @@ config AD7303 To compile this driver as module choose M here: the module will be called ad7303. -config CIO_DAC - tristate "Measurement Computing CIO-DAC IIO driver" - depends on X86 && (ISA_BUS || PC104) - select ISA_BUS_API - help - Say yes here to build support for the Measurement Computing CIO-DAC - analog output device family (CIO-DAC16, CIO-DAC08, PC104-DAC06). The - base port addresses for the devices may be configured via the base - array module parameter. - config AD8801 tristate "Analog Devices AD8801/AD8803 DAC driver" depends on SPI_MASTER @@ -249,6 +229,16 @@ config AD8801 To compile this driver as a module choose M here: the module will be called ad8801. +config CIO_DAC + tristate "Measurement Computing CIO-DAC IIO driver" + depends on X86 && (ISA_BUS || PC104) + select ISA_BUS_API + help + Say yes here to build support for the Measurement Computing CIO-DAC + analog output device family (CIO-DAC16, CIO-DAC08, PC104-DAC06). The + base port addresses for the devices may be configured via the base + array module parameter. + config DPOT_DAC tristate "DAC emulation using a DPOT" depends on OF @@ -278,6 +268,27 @@ config LPC18XX_DAC To compile this driver as a module, choose M here: the module will be called lpc18xx_dac. +config LTC1660 + tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver" + depends on SPI + help + Say yes here to build support for Linear Technology + LTC1660 and LTC1665 Digital to Analog Converters. + + To compile this driver as a module, choose M here: the + module will be called ltc1660. + +config LTC2632 + tristate "Linear Technology LTC2632-12/10/8 and LTC2636-12/10/8 DAC spi driver" + depends on SPI + help + Say yes here to build support for Linear Technology + LTC2632-12, LTC2632-10, LTC2632-8, LTC2636-12, LTC2636-10 and + LTC2636-8 converters (DAC). + + To compile this driver as a module, choose M here: the + module will be called ltc2632. + config M62332 tristate "Mitsubishi M62332 DAC driver" depends on I2C diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 1369fa1d2f0e..2fc481167724 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_AD5755) += ad5755.o obj-$(CONFIG_AD5755) += ad5758.o obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o +obj-$(CONFIG_AD5770R) += ad5770r.o obj-$(CONFIG_AD5791) += ad5791.o obj-$(CONFIG_AD5686) += ad5686.o obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c index b9175fb4c8ab..388ddd14bfd0 100644 --- a/drivers/iio/dac/ad5755.c +++ b/drivers/iio/dac/ad5755.c @@ -631,10 +631,9 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) } } - if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) { + if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) dev_err(dev, - "adi,dc-dc-freq out of range selecting 410kHz"); - } + "adi,dc-dc-freq out of range selecting 410kHz\n"); } pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V; @@ -645,17 +644,16 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) break; } } - if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) { + if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) dev_err(dev, - "adi,dc-dc-maxv out of range selecting 23V"); - } + "adi,dc-dc-maxv out of range selecting 23V\n"); } devnr = 0; for_each_child_of_node(np, pp) { if (devnr >= AD5755_NUM_CHANNELS) { dev_err(dev, - "There is to many channels defined in DT\n"); + "There are too many channels defined in DT\n"); goto error_out; } @@ -681,11 +679,10 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) break; } } - if (i == ARRAY_SIZE(ad5755_slew_rate_table)) { + if (i == ARRAY_SIZE(ad5755_slew_rate_table)) dev_err(dev, - "channel %d slew rate out of range selecting 64kHz", + "channel %d slew rate out of range selecting 64kHz\n", devnr); - } pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1; for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) { @@ -695,11 +692,10 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) break; } } - if (i == ARRAY_SIZE(ad5755_slew_step_table)) { + if (i == ARRAY_SIZE(ad5755_slew_step_table)) dev_err(dev, - "channel %d slew step size out of range selecting 1 LSB", + "channel %d slew step size out of range selecting 1 LSB\n", devnr); - } } else { pdata->dac[devnr].slew.enable = false; pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c new file mode 100644 index 000000000000..a98ea76732e7 --- /dev/null +++ b/drivers/iio/dac/ad5770r.c @@ -0,0 +1,695 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * AD5770R Digital to analog converters driver + * + * Copyright 2018 Analog Devices Inc. + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#define ADI_SPI_IF_CONFIG_A 0x00 +#define ADI_SPI_IF_CONFIG_B 0x01 +#define ADI_SPI_IF_DEVICE_CONFIG 0x02 +#define ADI_SPI_IF_CHIP_TYPE 0x03 +#define ADI_SPI_IF_PRODUCT_ID_L 0x04 +#define ADI_SPI_IF_PRODUCT_ID_H 0x05 +#define ADI_SPI_IF_CHIP_GRADE 0x06 +#define ADI_SPI_IF_SCRACTH_PAD 0x0A +#define ADI_SPI_IF_SPI_REVISION 0x0B +#define ADI_SPI_IF_SPI_VENDOR_L 0x0C +#define ADI_SPI_IF_SPI_VENDOR_H 0x0D +#define ADI_SPI_IF_SPI_STREAM_MODE 0x0E +#define ADI_SPI_IF_CONFIG_C 0x10 +#define ADI_SPI_IF_STATUS_A 0x11 + +/* ADI_SPI_IF_CONFIG_A */ +#define ADI_SPI_IF_SW_RESET_MSK (BIT(0) | BIT(7)) +#define ADI_SPI_IF_SW_RESET_SEL(x) ((x) & ADI_SPI_IF_SW_RESET_MSK) +#define ADI_SPI_IF_ADDR_ASC_MSK (BIT(2) | BIT(5)) +#define ADI_SPI_IF_ADDR_ASC_SEL(x) (((x) << 2) & ADI_SPI_IF_ADDR_ASC_MSK) + +/* ADI_SPI_IF_CONFIG_B */ +#define ADI_SPI_IF_SINGLE_INS_MSK BIT(7) +#define ADI_SPI_IF_SINGLE_INS_SEL(x) FIELD_PREP(ADI_SPI_IF_SINGLE_INS_MSK, x) +#define ADI_SPI_IF_SHORT_INS_MSK BIT(7) +#define ADI_SPI_IF_SHORT_INS_SEL(x) FIELD_PREP(ADI_SPI_IF_SINGLE_INS_MSK, x) + +/* ADI_SPI_IF_CONFIG_C */ +#define ADI_SPI_IF_STRICT_REG_MSK BIT(5) +#define ADI_SPI_IF_STRICT_REG_GET(x) FIELD_GET(ADI_SPI_IF_STRICT_REG_MSK, x) + +/* AD5770R configuration registers */ +#define AD5770R_CHANNEL_CONFIG 0x14 +#define AD5770R_OUTPUT_RANGE(ch) (0x15 + (ch)) +#define AD5770R_FILTER_RESISTOR(ch) (0x1D + (ch)) +#define AD5770R_REFERENCE 0x1B +#define AD5770R_DAC_LSB(ch) (0x26 + 2 * (ch)) +#define AD5770R_DAC_MSB(ch) (0x27 + 2 * (ch)) +#define AD5770R_CH_SELECT 0x34 +#define AD5770R_CH_ENABLE 0x44 + +/* AD5770R_CHANNEL_CONFIG */ +#define AD5770R_CFG_CH0_SINK_EN(x) (((x) & 0x1) << 7) +#define AD5770R_CFG_SHUTDOWN_B(x, ch) (((x) & 0x1) << (ch)) + +/* AD5770R_OUTPUT_RANGE */ +#define AD5770R_RANGE_OUTPUT_SCALING(x) (((x) & GENMASK(5, 0)) << 2) +#define AD5770R_RANGE_MODE(x) ((x) & GENMASK(1, 0)) + +/* AD5770R_REFERENCE */ +#define AD5770R_REF_RESISTOR_SEL(x) (((x) & 0x1) << 2) +#define AD5770R_REF_SEL(x) ((x) & GENMASK(1, 0)) + +/* AD5770R_CH_ENABLE */ +#define AD5770R_CH_SET(x, ch) (((x) & 0x1) << (ch)) + +#define AD5770R_MAX_CHANNELS 6 +#define AD5770R_MAX_CH_MODES 14 +#define AD5770R_LOW_VREF_mV 1250 +#define AD5770R_HIGH_VREF_mV 2500 + +enum ad5770r_ch0_modes { + AD5770R_CH0_0_300 = 0, + AD5770R_CH0_NEG_60_0, + AD5770R_CH0_NEG_60_300 +}; + +enum ad5770r_ch1_modes { + AD5770R_CH1_0_140_LOW_HEAD = 1, + AD5770R_CH1_0_140_LOW_NOISE, + AD5770R_CH1_0_250 +}; + +enum ad5770r_ch2_5_modes { + AD5770R_CH_LOW_RANGE = 0, + AD5770R_CH_HIGH_RANGE +}; + +enum ad5770r_ref_v { + AD5770R_EXT_2_5_V = 0, + AD5770R_INT_1_25_V_OUT_ON, + AD5770R_EXT_1_25_V, + AD5770R_INT_1_25_V_OUT_OFF +}; + +enum ad5770r_output_filter_resistor { + AD5770R_FILTER_60_OHM = 0x0, + AD5770R_FILTER_5_6_KOHM = 0x5, + AD5770R_FILTER_11_2_KOHM, + AD5770R_FILTER_22_2_KOHM, + AD5770R_FILTER_44_4_KOHM, + AD5770R_FILTER_104_KOHM, +}; + +struct ad5770r_out_range { + u8 out_scale; + u8 out_range_mode; +}; + +/** + * struct ad5770R_state - driver instance specific data + * @spi: spi_device + * @regmap: regmap + * @vref_reg: fixed regulator for reference configuration + * @gpio_reset: gpio descriptor + * @output_mode: array contains channels output ranges + * @vref: reference value + * @ch_pwr_down: powerdown flags + * @internal_ref: internal reference flag + * @external_res: external 2.5k resistor flag + * @transf_buf: cache aligned buffer for spi read/write + */ +struct ad5770r_state { + struct spi_device *spi; + struct regmap *regmap; + struct regulator *vref_reg; + struct gpio_desc *gpio_reset; + struct ad5770r_out_range output_mode[AD5770R_MAX_CHANNELS]; + int vref; + bool ch_pwr_down[AD5770R_MAX_CHANNELS]; + bool internal_ref; + bool external_res; + u8 transf_buf[2] ____cacheline_aligned; +}; + +static const struct regmap_config ad5770r_spi_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .read_flag_mask = BIT(7), +}; + +struct ad5770r_output_modes { + unsigned int ch; + u8 mode; + int min; + int max; +}; + +static struct ad5770r_output_modes ad5770r_rng_tbl[] = { + { 0, AD5770R_CH0_0_300, 0, 300 }, + { 0, AD5770R_CH0_NEG_60_0, -60, 0 }, + { 0, AD5770R_CH0_NEG_60_300, -60, 300 }, + { 1, AD5770R_CH1_0_140_LOW_HEAD, 0, 140 }, + { 1, AD5770R_CH1_0_140_LOW_NOISE, 0, 140 }, + { 1, AD5770R_CH1_0_250, 0, 250 }, + { 2, AD5770R_CH_LOW_RANGE, 0, 55 }, + { 2, AD5770R_CH_HIGH_RANGE, 0, 150 }, + { 3, AD5770R_CH_LOW_RANGE, 0, 45 }, + { 3, AD5770R_CH_HIGH_RANGE, 0, 100 }, + { 4, AD5770R_CH_LOW_RANGE, 0, 45 }, + { 4, AD5770R_CH_HIGH_RANGE, 0, 100 }, + { 5, AD5770R_CH_LOW_RANGE, 0, 45 }, + { 5, AD5770R_CH_HIGH_RANGE, 0, 100 }, +}; + +static const unsigned int ad5770r_filter_freqs[] = { + 153, 357, 715, 1400, 2800, 262000, +}; + +static const unsigned int ad5770r_filter_reg_vals[] = { + AD5770R_FILTER_104_KOHM, + AD5770R_FILTER_44_4_KOHM, + AD5770R_FILTER_22_2_KOHM, + AD5770R_FILTER_11_2_KOHM, + AD5770R_FILTER_5_6_KOHM, + AD5770R_FILTER_60_OHM +}; + +static int ad5770r_set_output_mode(struct ad5770r_state *st, + const struct ad5770r_out_range *out_mode, + int channel) +{ + unsigned int regval; + + regval = AD5770R_RANGE_OUTPUT_SCALING(out_mode->out_scale) | + AD5770R_RANGE_MODE(out_mode->out_range_mode); + + return regmap_write(st->regmap, + AD5770R_OUTPUT_RANGE(channel), regval); +} + +static int ad5770r_set_reference(struct ad5770r_state *st) +{ + unsigned int regval; + + regval = AD5770R_REF_RESISTOR_SEL(st->external_res); + + if (st->internal_ref) { + regval |= AD5770R_REF_SEL(AD5770R_INT_1_25_V_OUT_OFF); + } else { + switch (st->vref) { + case AD5770R_LOW_VREF_mV: + regval |= AD5770R_REF_SEL(AD5770R_EXT_1_25_V); + break; + case AD5770R_HIGH_VREF_mV: + regval |= AD5770R_REF_SEL(AD5770R_EXT_2_5_V); + break; + default: + regval = AD5770R_REF_SEL(AD5770R_INT_1_25_V_OUT_OFF); + break; + } + } + + return regmap_write(st->regmap, AD5770R_REFERENCE, regval); +} + +static int ad5770r_soft_reset(struct ad5770r_state *st) +{ + return regmap_write(st->regmap, ADI_SPI_IF_CONFIG_A, + ADI_SPI_IF_SW_RESET_SEL(1)); +} + +static int ad5770r_reset(struct ad5770r_state *st) +{ + /* Perform software reset if no GPIO provided */ + if (!st->gpio_reset) + return ad5770r_soft_reset(st); + + gpiod_set_value_cansleep(st->gpio_reset, 0); + usleep_range(10, 20); + gpiod_set_value_cansleep(st->gpio_reset, 1); + + /* data must not be written during reset timeframe */ + usleep_range(100, 200); + + return 0; +} + +static int ad5770r_get_range(struct ad5770r_state *st, + int ch, int *min, int *max) +{ + int i; + u8 tbl_ch, tbl_mode, out_range; + + out_range = st->output_mode[ch].out_range_mode; + + for (i = 0; i < AD5770R_MAX_CH_MODES; i++) { + tbl_ch = ad5770r_rng_tbl[i].ch; + tbl_mode = ad5770r_rng_tbl[i].mode; + if (tbl_ch == ch && tbl_mode == out_range) { + *min = ad5770r_rng_tbl[i].min; + *max = ad5770r_rng_tbl[i].max; + return 0; + } + } + + return -EINVAL; +} + +static int ad5770r_get_filter_freq(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, int *freq) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + int ret; + unsigned int regval, i; + + ret = regmap_read(st->regmap, + AD5770R_FILTER_RESISTOR(chan->channel), ®val); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(ad5770r_filter_reg_vals); i++) + if (regval == ad5770r_filter_reg_vals[i]) + break; + if (i == ARRAY_SIZE(ad5770r_filter_reg_vals)) + return -EINVAL; + + *freq = ad5770r_filter_freqs[i]; + + return IIO_VAL_INT; +} + +static int ad5770r_set_filter_freq(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int freq) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + unsigned int regval, i; + + for (i = 0; i < ARRAY_SIZE(ad5770r_filter_freqs); i++) + if (ad5770r_filter_freqs[i] >= freq) + break; + if (i == ARRAY_SIZE(ad5770r_filter_freqs)) + return -EINVAL; + + regval = ad5770r_filter_reg_vals[i]; + + return regmap_write(st->regmap, AD5770R_FILTER_RESISTOR(chan->channel), + regval); +} + +static int ad5770r_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + int max, min, ret; + u16 buf16; + + switch (info) { + case IIO_CHAN_INFO_RAW: + ret = regmap_bulk_read(st->regmap, + chan->address, + st->transf_buf, 2); + if (ret) + return 0; + + buf16 = st->transf_buf[0] + (st->transf_buf[1] << 8); + *val = buf16 >> 2; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + ret = ad5770r_get_range(st, chan->channel, &min, &max); + if (ret < 0) + return ret; + *val = max - min; + /* There is no sign bit. (negative current is mapped from 0) + * (sourced/sinked) current = raw * scale + offset + * where offset in case of CH0 can be negative. + */ + *val2 = 14; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + return ad5770r_get_filter_freq(indio_dev, chan, val); + case IIO_CHAN_INFO_OFFSET: + ret = ad5770r_get_range(st, chan->channel, &min, &max); + if (ret < 0) + return ret; + *val = min; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int ad5770r_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + st->transf_buf[0] = ((u16)val >> 6); + st->transf_buf[1] = (val & GENMASK(5, 0)) << 2; + return regmap_bulk_write(st->regmap, chan->address, + st->transf_buf, 2); + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + return ad5770r_set_filter_freq(indio_dev, chan, val); + default: + return -EINVAL; + } +} + +static int ad5770r_read_freq_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + *type = IIO_VAL_INT; + *vals = ad5770r_filter_freqs; + *length = ARRAY_SIZE(ad5770r_filter_freqs); + return IIO_AVAIL_LIST; + } + + return -EINVAL; +} + +static int ad5770r_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int writeval, + unsigned int *readval) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + + if (readval) + return regmap_read(st->regmap, reg, readval); + else + return regmap_write(st->regmap, reg, writeval); +} + +static const struct iio_info ad5770r_info = { + .read_raw = ad5770r_read_raw, + .write_raw = ad5770r_write_raw, + .read_avail = ad5770r_read_freq_avail, + .debugfs_reg_access = &ad5770r_reg_access, +}; + +static int ad5770r_store_output_range(struct ad5770r_state *st, + int min, int max, int index) +{ + int i; + + for (i = 0; i < AD5770R_MAX_CH_MODES; i++) { + if (ad5770r_rng_tbl[i].ch != index) + continue; + if (ad5770r_rng_tbl[i].min != min || + ad5770r_rng_tbl[i].max != max) + continue; + st->output_mode[index].out_range_mode = ad5770r_rng_tbl[i].mode; + + return 0; + } + + return -EINVAL; +} + +static ssize_t ad5770r_read_dac_powerdown(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + + return sprintf(buf, "%d\n", st->ch_pwr_down[chan->channel]); +} + +static ssize_t ad5770r_write_dac_powerdown(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct ad5770r_state *st = iio_priv(indio_dev); + unsigned int regval; + unsigned int mask; + bool readin; + int ret; + + ret = kstrtobool(buf, &readin); + if (ret) + return ret; + + readin = !readin; + + regval = AD5770R_CFG_SHUTDOWN_B(readin, chan->channel); + if (chan->channel == 0 && + st->output_mode[0].out_range_mode > AD5770R_CH0_0_300) { + regval |= AD5770R_CFG_CH0_SINK_EN(readin); + mask = BIT(chan->channel) + BIT(7); + } else { + mask = BIT(chan->channel); + } + ret = regmap_update_bits(st->regmap, AD5770R_CHANNEL_CONFIG, mask, + regval); + if (ret) + return ret; + + regval = AD5770R_CH_SET(readin, chan->channel); + ret = regmap_update_bits(st->regmap, AD5770R_CH_ENABLE, + BIT(chan->channel), regval); + if (ret) + return ret; + + st->ch_pwr_down[chan->channel] = !readin; + + return len; +} + +static const struct iio_chan_spec_ext_info ad5770r_ext_info[] = { + { + .name = "powerdown", + .read = ad5770r_read_dac_powerdown, + .write = ad5770r_write_dac_powerdown, + .shared = IIO_SEPARATE, + }, + { } +}; + +#define AD5770R_IDAC_CHANNEL(index, reg) { \ + .type = IIO_CURRENT, \ + .address = reg, \ + .indexed = 1, \ + .channel = index, \ + .output = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET) | \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ + .ext_info = ad5770r_ext_info, \ +} + +static const struct iio_chan_spec ad5770r_channels[] = { + AD5770R_IDAC_CHANNEL(0, AD5770R_DAC_MSB(0)), + AD5770R_IDAC_CHANNEL(1, AD5770R_DAC_MSB(1)), + AD5770R_IDAC_CHANNEL(2, AD5770R_DAC_MSB(2)), + AD5770R_IDAC_CHANNEL(3, AD5770R_DAC_MSB(3)), + AD5770R_IDAC_CHANNEL(4, AD5770R_DAC_MSB(4)), + AD5770R_IDAC_CHANNEL(5, AD5770R_DAC_MSB(5)), +}; + +static int ad5770r_channel_config(struct ad5770r_state *st) +{ + int ret, tmp[2], min, max; + unsigned int num; + struct fwnode_handle *child; + + num = device_get_child_node_count(&st->spi->dev); + if (num != AD5770R_MAX_CHANNELS) + return -EINVAL; + + device_for_each_child_node(&st->spi->dev, child) { + ret = fwnode_property_read_u32(child, "num", &num); + if (ret) + return ret; + if (num > AD5770R_MAX_CHANNELS) + return -EINVAL; + + ret = fwnode_property_read_u32_array(child, + "adi,range-microamp", + tmp, 2); + if (ret) + return ret; + + min = tmp[0] / 1000; + max = tmp[1] / 1000; + ret = ad5770r_store_output_range(st, min, max, num); + if (ret) + return ret; + } + + return ret; +} + +static int ad5770r_init(struct ad5770r_state *st) +{ + int ret, i; + + st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(st->gpio_reset)) + return PTR_ERR(st->gpio_reset); + + /* Perform a reset */ + ret = ad5770r_reset(st); + if (ret) + return ret; + + /* Set output range */ + ret = ad5770r_channel_config(st); + if (ret) + return ret; + + for (i = 0; i < AD5770R_MAX_CHANNELS; i++) { + ret = ad5770r_set_output_mode(st, &st->output_mode[i], i); + if (ret) + return ret; + } + + st->external_res = fwnode_property_read_bool(st->spi->dev.fwnode, + "adi,external-resistor"); + + ret = ad5770r_set_reference(st); + if (ret) + return ret; + + /* Set outputs off */ + ret = regmap_write(st->regmap, AD5770R_CHANNEL_CONFIG, 0x00); + if (ret) + return ret; + + ret = regmap_write(st->regmap, AD5770R_CH_ENABLE, 0x00); + if (ret) + return ret; + + for (i = 0; i < AD5770R_MAX_CHANNELS; i++) + st->ch_pwr_down[i] = true; + + return ret; +} + +static void ad5770r_disable_regulator(void *data) +{ + struct ad5770r_state *st = data; + + regulator_disable(st->vref_reg); +} + +static int ad5770r_probe(struct spi_device *spi) +{ + struct ad5770r_state *st; + struct iio_dev *indio_dev; + struct regmap *regmap; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + spi_set_drvdata(spi, indio_dev); + + st->spi = spi; + + regmap = devm_regmap_init_spi(spi, &ad5770r_spi_regmap_config); + if (IS_ERR(regmap)) { + dev_err(&spi->dev, "Error initializing spi regmap: %ld\n", + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + st->regmap = regmap; + + st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref"); + if (!IS_ERR(st->vref_reg)) { + ret = regulator_enable(st->vref_reg); + if (ret) { + dev_err(&spi->dev, + "Failed to enable vref regulators: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, + ad5770r_disable_regulator, + st); + if (ret < 0) + return ret; + + ret = regulator_get_voltage(st->vref_reg); + if (ret < 0) + return ret; + + st->vref = ret / 1000; + } else { + if (PTR_ERR(st->vref_reg) == -ENODEV) { + st->vref = AD5770R_LOW_VREF_mV; + st->internal_ref = true; + } else { + return PTR_ERR(st->vref_reg); + } + } + + indio_dev->dev.parent = &spi->dev; + indio_dev->name = spi_get_device_id(spi)->name; + indio_dev->info = &ad5770r_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ad5770r_channels; + indio_dev->num_channels = ARRAY_SIZE(ad5770r_channels); + + ret = ad5770r_init(st); + if (ret < 0) { + dev_err(&spi->dev, "AD5770R init failed\n"); + return ret; + } + + return devm_iio_device_register(&st->spi->dev, indio_dev); +} + +static const struct of_device_id ad5770r_of_id[] = { + { .compatible = "adi,ad5770r", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ad5770r_of_id); + +static const struct spi_device_id ad5770r_id[] = { + { "ad5770r", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(spi, ad5770r_id); + +static struct spi_driver ad5770r_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ad5770r_of_id, + }, + .probe = ad5770r_probe, + .id_table = ad5770r_id, +}; + +module_spi_driver(ad5770r_driver); + +MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>"); +MODULE_DESCRIPTION("Analog Devices AD5770R IDAC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c index 643d1ce956ce..7adc91056aa1 100644 --- a/drivers/iio/dac/ltc2632.c +++ b/drivers/iio/dac/ltc2632.c @@ -12,11 +12,6 @@ #include <linux/iio/iio.h> #include <linux/regulator/consumer.h> -#define LTC2632_DAC_CHANNELS 2 - -#define LTC2632_ADDR_DAC0 0x0 -#define LTC2632_ADDR_DAC1 0x1 - #define LTC2632_CMD_WRITE_INPUT_N 0x0 #define LTC2632_CMD_UPDATE_DAC_N 0x1 #define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2 @@ -33,6 +28,7 @@ */ struct ltc2632_chip_info { const struct iio_chan_spec *channels; + const size_t num_channels; const int vref_mv; }; @@ -57,6 +53,12 @@ enum ltc2632_supported_device_ids { ID_LTC2632H12, ID_LTC2632H10, ID_LTC2632H8, + ID_LTC2636L12, + ID_LTC2636L10, + ID_LTC2636L8, + ID_LTC2636H12, + ID_LTC2636H10, + ID_LTC2636H8, }; static int ltc2632_spi_write(struct spi_device *spi, @@ -190,39 +192,77 @@ static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = { const struct iio_chan_spec _name ## _channels[] = { \ LTC2632_CHANNEL(0, _bits), \ LTC2632_CHANNEL(1, _bits), \ + LTC2632_CHANNEL(2, _bits), \ + LTC2632_CHANNEL(3, _bits), \ + LTC2632_CHANNEL(4, _bits), \ + LTC2632_CHANNEL(5, _bits), \ + LTC2632_CHANNEL(6, _bits), \ + LTC2632_CHANNEL(7, _bits), \ } -static DECLARE_LTC2632_CHANNELS(ltc2632l12, 12); -static DECLARE_LTC2632_CHANNELS(ltc2632l10, 10); -static DECLARE_LTC2632_CHANNELS(ltc2632l8, 8); - -static DECLARE_LTC2632_CHANNELS(ltc2632h12, 12); -static DECLARE_LTC2632_CHANNELS(ltc2632h10, 10); -static DECLARE_LTC2632_CHANNELS(ltc2632h8, 8); +static DECLARE_LTC2632_CHANNELS(ltc2632x12, 12); +static DECLARE_LTC2632_CHANNELS(ltc2632x10, 10); +static DECLARE_LTC2632_CHANNELS(ltc2632x8, 8); static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = { [ID_LTC2632L12] = { - .channels = ltc2632l12_channels, + .channels = ltc2632x12_channels, + .num_channels = 2, .vref_mv = 2500, }, [ID_LTC2632L10] = { - .channels = ltc2632l10_channels, + .channels = ltc2632x10_channels, + .num_channels = 2, .vref_mv = 2500, }, [ID_LTC2632L8] = { - .channels = ltc2632l8_channels, + .channels = ltc2632x8_channels, + .num_channels = 2, .vref_mv = 2500, }, [ID_LTC2632H12] = { - .channels = ltc2632h12_channels, + .channels = ltc2632x12_channels, + .num_channels = 2, .vref_mv = 4096, }, [ID_LTC2632H10] = { - .channels = ltc2632h10_channels, + .channels = ltc2632x10_channels, + .num_channels = 2, .vref_mv = 4096, }, [ID_LTC2632H8] = { - .channels = ltc2632h8_channels, + .channels = ltc2632x8_channels, + .num_channels = 2, + .vref_mv = 4096, + }, + [ID_LTC2636L12] = { + .channels = ltc2632x12_channels, + .num_channels = 8, + .vref_mv = 2500, + }, + [ID_LTC2636L10] = { + .channels = ltc2632x10_channels, + .num_channels = 8, + .vref_mv = 2500, + }, + [ID_LTC2636L8] = { + .channels = ltc2632x8_channels, + .num_channels = 8, + .vref_mv = 2500, + }, + [ID_LTC2636H12] = { + .channels = ltc2632x12_channels, + .num_channels = 8, + .vref_mv = 4096, + }, + [ID_LTC2636H10] = { + .channels = ltc2632x10_channels, + .num_channels = 8, + .vref_mv = 4096, + }, + [ID_LTC2636H8] = { + .channels = ltc2632x8_channels, + .num_channels = 8, .vref_mv = 4096, }, }; @@ -291,7 +331,7 @@ static int ltc2632_probe(struct spi_device *spi) indio_dev->info = <c2632_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = chip_info->channels; - indio_dev->num_channels = LTC2632_DAC_CHANNELS; + indio_dev->num_channels = chip_info->num_channels; return iio_device_register(indio_dev); } @@ -316,6 +356,12 @@ static const struct spi_device_id ltc2632_id[] = { { "ltc2632-h12", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2632H12] }, { "ltc2632-h10", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2632H10] }, { "ltc2632-h8", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2632H8] }, + { "ltc2636-l12", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636L12] }, + { "ltc2636-l10", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636L10] }, + { "ltc2636-l8", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636L8] }, + { "ltc2636-h12", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636H12] }, + { "ltc2636-h10", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636H10] }, + { "ltc2636-h8", (kernel_ulong_t)<c2632_chip_info_tbl[ID_LTC2636H8] }, {} }; MODULE_DEVICE_TABLE(spi, ltc2632_id); @@ -339,6 +385,24 @@ static const struct of_device_id ltc2632_of_match[] = { }, { .compatible = "lltc,ltc2632-h8", .data = <c2632_chip_info_tbl[ID_LTC2632H8] + }, { + .compatible = "lltc,ltc2636-l12", + .data = <c2632_chip_info_tbl[ID_LTC2636L12] + }, { + .compatible = "lltc,ltc2636-l10", + .data = <c2632_chip_info_tbl[ID_LTC2636L10] + }, { + .compatible = "lltc,ltc2636-l8", + .data = <c2632_chip_info_tbl[ID_LTC2636L8] + }, { + .compatible = "lltc,ltc2636-h12", + .data = <c2632_chip_info_tbl[ID_LTC2636H12] + }, { + .compatible = "lltc,ltc2636-h10", + .data = <c2632_chip_info_tbl[ID_LTC2636H10] + }, { + .compatible = "lltc,ltc2636-h8", + .data = <c2632_chip_info_tbl[ID_LTC2636H8] }, {} }; diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c index d5e03a406d4a..a4c967a5fc5c 100644 --- a/drivers/iio/gyro/adis16136.c +++ b/drivers/iio/gyro/adis16136.c @@ -59,7 +59,7 @@ struct adis16136_chip_info { unsigned int precision; unsigned int fullscale; - const struct adis_timeout *timeouts; + const struct adis_data adis_data; }; struct adis16136 { @@ -466,22 +466,22 @@ static const char * const adis16136_status_error_msgs[] = { [ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL] = "Flash checksum error", }; -static const struct adis_data adis16136_data = { - .diag_stat_reg = ADIS16136_REG_DIAG_STAT, - .glob_cmd_reg = ADIS16136_REG_GLOB_CMD, - .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL, - - .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST, - - .read_delay = 10, - .write_delay = 10, - - .status_error_msgs = adis16136_status_error_msgs, - .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) | - BIT(ADIS16136_DIAG_STAT_SPI_FAIL) | - BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) | - BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL), -}; +#define ADIS16136_DATA(_timeouts) \ +{ \ + .diag_stat_reg = ADIS16136_REG_DIAG_STAT, \ + .glob_cmd_reg = ADIS16136_REG_GLOB_CMD, \ + .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL, \ + .self_test_reg = ADIS16136_REG_MSC_CTRL, \ + .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST, \ + .read_delay = 10, \ + .write_delay = 10, \ + .status_error_msgs = adis16136_status_error_msgs, \ + .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) | \ + BIT(ADIS16136_DIAG_STAT_SPI_FAIL) | \ + BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) | \ + BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL), \ + .timeouts = (_timeouts), \ +} enum adis16136_id { ID_ADIS16133, @@ -506,41 +506,25 @@ static const struct adis16136_chip_info adis16136_chip_info[] = { [ID_ADIS16133] = { .precision = IIO_DEGREE_TO_RAD(1200), .fullscale = 24000, - .timeouts = &adis16133_timeouts, + .adis_data = ADIS16136_DATA(&adis16133_timeouts), }, [ID_ADIS16135] = { .precision = IIO_DEGREE_TO_RAD(300), .fullscale = 24000, - .timeouts = &adis16133_timeouts, + .adis_data = ADIS16136_DATA(&adis16133_timeouts), }, [ID_ADIS16136] = { .precision = IIO_DEGREE_TO_RAD(450), .fullscale = 24623, - .timeouts = &adis16136_timeouts, + .adis_data = ADIS16136_DATA(&adis16136_timeouts), }, [ID_ADIS16137] = { .precision = IIO_DEGREE_TO_RAD(1000), .fullscale = 24609, - .timeouts = &adis16136_timeouts, + .adis_data = ADIS16136_DATA(&adis16136_timeouts), }, }; -static struct adis_data *adis16136_adis_data_alloc(struct adis16136 *st, - struct device *dev) -{ - struct adis_data *data; - - data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); - - memcpy(data, &adis16136_data, sizeof(*data)); - - data->timeouts = st->chip_info->timeouts; - - return data; -} - static int adis16136_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); @@ -565,9 +549,7 @@ static int adis16136_probe(struct spi_device *spi) indio_dev->info = &adis16136_info; indio_dev->modes = INDIO_DIRECT_MODE; - adis16136_data = adis16136_adis_data_alloc(adis16136, &spi->dev); - if (IS_ERR(adis16136_data)) - return PTR_ERR(adis16136_data); + adis16136_data = &adis16136->chip_info->adis_data; ret = adis_init(&adis16136->adis, indio_dev, spi, adis16136_data); if (ret) diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c index be09b3e5910c..9823573e811a 100644 --- a/drivers/iio/gyro/adis16260.c +++ b/drivers/iio/gyro/adis16260.c @@ -346,6 +346,7 @@ static const struct adis_data adis16260_data = { .diag_stat_reg = ADIS16260_DIAG_STAT, .self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST, + .self_test_reg = ADIS16260_MSC_CTRL, .timeouts = &adis16260_timeouts, .status_error_msgs = adis1620_status_error_msgs, diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index 022bb54fb748..a8afd01de4f3 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -7,6 +7,7 @@ */ #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> @@ -346,8 +347,8 @@ static int adis_self_test(struct adis *adis) int ret; const struct adis_timeout *timeouts = adis->data->timeouts; - ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, - adis->data->self_test_mask); + ret = __adis_write_reg_16(adis, adis->data->self_test_reg, + adis->data->self_test_mask); if (ret) { dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n", ret); @@ -359,42 +360,71 @@ static int adis_self_test(struct adis *adis) ret = __adis_check_status(adis); if (adis->data->self_test_no_autoclear) - __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00); + __adis_write_reg_16(adis, adis->data->self_test_reg, 0x00); return ret; } /** - * adis_inital_startup() - Performs device self-test + * __adis_initial_startup() - Device initial setup * @adis: The adis device * + * The function performs a HW reset via a reset pin that should be specified + * via GPIOLIB. If no pin is configured a SW reset will be performed. + * The RST pin for the ADIS devices should be configured as ACTIVE_LOW. + * + * After the self-test operation is performed, the function will also check + * that the product ID is as expected. This assumes that drivers providing + * 'prod_id_reg' will also provide the 'prod_id'. + * * Returns 0 if the device is operational, a negative error code otherwise. * * This function should be called early on in the device initialization sequence * to ensure that the device is in a sane and known state and that it is usable. */ -int adis_initial_startup(struct adis *adis) +int __adis_initial_startup(struct adis *adis) { + const struct adis_timeout *timeouts = adis->data->timeouts; + struct gpio_desc *gpio; + uint16_t prod_id; int ret; - mutex_lock(&adis->state_lock); + /* check if the device has rst pin low */ + gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + if (gpio) { + gpiod_set_value_cansleep(gpio, 1); + msleep(10); + /* bring device out of reset */ + gpiod_set_value_cansleep(gpio, 0); + msleep(timeouts->reset_ms); + } else { + ret = __adis_reset(adis); + if (ret) + return ret; + } ret = adis_self_test(adis); - if (ret) { - dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n"); - __adis_reset(adis); - ret = adis_self_test(adis); - if (ret) { - dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n"); - goto out_unlock; - } - } + if (ret) + return ret; -out_unlock: - mutex_unlock(&adis->state_lock); - return ret; + if (!adis->data->prod_id_reg) + return 0; + + ret = adis_read_reg_16(adis, adis->data->prod_id_reg, &prod_id); + if (ret) + return ret; + + if (prod_id != adis->data->prod_id) + dev_warn(&adis->spi->dev, + "Device ID(%u) and product ID(%u) do not match.", + adis->data->prod_id, prod_id); + + return 0; } -EXPORT_SYMBOL_GPL(adis_initial_startup); +EXPORT_SYMBOL_GPL(__adis_initial_startup); /** * adis_single_conversion() - Performs a single sample conversion diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index cfb1c19eb930..05e70c1c4835 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -156,7 +156,7 @@ struct adis16400_state; struct adis16400_chip_info { const struct iio_chan_spec *channels; - const struct adis_timeout *timeouts; + const struct adis_data adis_data; const int num_channels; const long flags; unsigned int gyro_scale_micro; @@ -930,12 +930,64 @@ static const struct iio_chan_spec adis16334_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; +static const char * const adis16400_status_error_msgs[] = { + [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", + [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", + [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure", + [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure", + [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure", + [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure", + [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active", + [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active", + [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error", + [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error", + [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange", + [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure", + [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed", + [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V", + [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V", +}; + +#define ADIS16400_DATA(_timeouts) \ +{ \ + .msc_ctrl_reg = ADIS16400_MSC_CTRL, \ + .glob_cmd_reg = ADIS16400_GLOB_CMD, \ + .diag_stat_reg = ADIS16400_DIAG_STAT, \ + .read_delay = 50, \ + .write_delay = 50, \ + .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST, \ + .self_test_reg = ADIS16400_MSC_CTRL, \ + .status_error_msgs = adis16400_status_error_msgs, \ + .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_ALARM2) | \ + BIT(ADIS16400_DIAG_STAT_ALARM1) | \ + BIT(ADIS16400_DIAG_STAT_FLASH_CHK) | \ + BIT(ADIS16400_DIAG_STAT_SELF_TEST) | \ + BIT(ADIS16400_DIAG_STAT_OVERFLOW) | \ + BIT(ADIS16400_DIAG_STAT_SPI_FAIL) | \ + BIT(ADIS16400_DIAG_STAT_FLASH_UPT) | \ + BIT(ADIS16400_DIAG_STAT_POWER_HIGH) | \ + BIT(ADIS16400_DIAG_STAT_POWER_LOW), \ + .timeouts = (_timeouts), \ +} + static const struct adis_timeout adis16300_timeouts = { .reset_ms = ADIS16400_STARTUP_DELAY, .sw_reset_ms = ADIS16400_STARTUP_DELAY, .self_test_ms = ADIS16400_STARTUP_DELAY, }; +static const struct adis_timeout adis16334_timeouts = { + .reset_ms = 60, + .sw_reset_ms = 60, + .self_test_ms = 14, +}; + static const struct adis_timeout adis16362_timeouts = { .reset_ms = 130, .sw_reset_ms = 130, @@ -972,7 +1024,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16300_timeouts, + .adis_data = ADIS16400_DATA(&adis16300_timeouts), }, [ADIS16334] = { .channels = adis16334_channels, @@ -985,6 +1037,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */ .set_freq = adis16334_set_freq, .get_freq = adis16334_get_freq, + .adis_data = ADIS16400_DATA(&adis16334_timeouts), }, [ADIS16350] = { .channels = adis16350_channels, @@ -996,7 +1049,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE, .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16300_timeouts, + .adis_data = ADIS16400_DATA(&adis16300_timeouts), }, [ADIS16360] = { .channels = adis16350_channels, @@ -1009,7 +1062,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16300_timeouts, + .adis_data = ADIS16400_DATA(&adis16300_timeouts), }, [ADIS16362] = { .channels = adis16350_channels, @@ -1022,7 +1075,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16362_timeouts, + .adis_data = ADIS16400_DATA(&adis16362_timeouts), }, [ADIS16364] = { .channels = adis16350_channels, @@ -1035,7 +1088,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16362_timeouts, + .adis_data = ADIS16400_DATA(&adis16362_timeouts), }, [ADIS16367] = { .channels = adis16350_channels, @@ -1048,7 +1101,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16300_timeouts, + .adis_data = ADIS16400_DATA(&adis16300_timeouts), }, [ADIS16400] = { .channels = adis16400_channels, @@ -1060,7 +1113,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */ .set_freq = adis16400_set_freq, .get_freq = adis16400_get_freq, - .timeouts = &adis16400_timeouts, + .adis_data = ADIS16400_DATA(&adis16400_timeouts), }, [ADIS16445] = { .channels = adis16445_channels, @@ -1074,7 +1127,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */ .set_freq = adis16334_set_freq, .get_freq = adis16334_get_freq, - .timeouts = &adis16445_timeouts, + .adis_data = ADIS16400_DATA(&adis16445_timeouts), }, [ADIS16448] = { .channels = adis16448_channels, @@ -1088,7 +1141,7 @@ static struct adis16400_chip_info adis16400_chips[] = { .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */ .set_freq = adis16334_set_freq, .get_freq = adis16334_get_freq, - .timeouts = &adis16448_timeouts, + .adis_data = ADIS16400_DATA(&adis16448_timeouts), } }; @@ -1099,52 +1152,6 @@ static const struct iio_info adis16400_info = { .debugfs_reg_access = adis_debugfs_reg_access, }; -static const char * const adis16400_status_error_msgs[] = { - [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", - [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", - [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure", - [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure", - [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure", - [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure", - [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active", - [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active", - [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error", - [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error", - [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange", - [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure", - [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed", - [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V", - [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V", -}; - -static const struct adis_data adis16400_data = { - .msc_ctrl_reg = ADIS16400_MSC_CTRL, - .glob_cmd_reg = ADIS16400_GLOB_CMD, - .diag_stat_reg = ADIS16400_DIAG_STAT, - - .read_delay = 50, - .write_delay = 50, - - .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST, - - .status_error_msgs = adis16400_status_error_msgs, - .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) | - BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) | - BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) | - BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) | - BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) | - BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) | - BIT(ADIS16400_DIAG_STAT_ALARM2) | - BIT(ADIS16400_DIAG_STAT_ALARM1) | - BIT(ADIS16400_DIAG_STAT_FLASH_CHK) | - BIT(ADIS16400_DIAG_STAT_SELF_TEST) | - BIT(ADIS16400_DIAG_STAT_OVERFLOW) | - BIT(ADIS16400_DIAG_STAT_SPI_FAIL) | - BIT(ADIS16400_DIAG_STAT_FLASH_UPT) | - BIT(ADIS16400_DIAG_STAT_POWER_HIGH) | - BIT(ADIS16400_DIAG_STAT_POWER_LOW), -}; - static void adis16400_setup_chan_mask(struct adis16400_state *st) { const struct adis16400_chip_info *chip_info = st->variant; @@ -1158,23 +1165,6 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st) st->avail_scan_mask[0] |= BIT(ch->scan_index); } } - -static struct adis_data *adis16400_adis_data_alloc(struct adis16400_state *st, - struct device *dev) -{ - struct adis_data *data; - - data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); - - memcpy(data, &adis16400_data, sizeof(*data)); - - data->timeouts = st->variant->timeouts; - - return data; -} - static int adis16400_probe(struct spi_device *spi) { struct adis16400_state *st; @@ -1207,9 +1197,7 @@ static int adis16400_probe(struct spi_device *spi) st->adis.burst->extra_len = sizeof(u16); } - adis16400_data = adis16400_adis_data_alloc(st, &spi->dev); - if (IS_ERR(adis16400_data)) - return PTR_ERR(adis16400_data); + adis16400_data = &st->variant->adis_data; ret = adis_init(&st->adis, indio_dev, spi, adis16400_data); if (ret) diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c index 9539cfe4a259..0027683d0256 100644 --- a/drivers/iio/imu/adis16460.c +++ b/drivers/iio/imu/adis16460.c @@ -333,40 +333,6 @@ static int adis16460_enable_irq(struct adis *adis, bool enable) return 0; } -static int adis16460_initial_setup(struct iio_dev *indio_dev) -{ - struct adis16460 *st = iio_priv(indio_dev); - uint16_t prod_id; - unsigned int device_id; - int ret; - - adis_reset(&st->adis); - msleep(222); - - ret = adis_write_reg_16(&st->adis, ADIS16460_REG_GLOB_CMD, BIT(1)); - if (ret) - return ret; - msleep(75); - - ret = adis_check_status(&st->adis); - if (ret) - return ret; - - ret = adis_read_reg_16(&st->adis, ADIS16460_REG_PROD_ID, &prod_id); - if (ret) - return ret; - - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); - if (ret != 1) - return -EINVAL; - - if (prod_id != device_id) - dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.", - device_id, prod_id); - - return 0; -} - #define ADIS16460_DIAG_STAT_IN_CLK_OOS 7 #define ADIS16460_DIAG_STAT_FLASH_MEM 6 #define ADIS16460_DIAG_STAT_SELF_TEST 5 @@ -392,6 +358,10 @@ static const struct adis_timeout adis16460_timeouts = { static const struct adis_data adis16460_data = { .diag_stat_reg = ADIS16460_REG_DIAG_STAT, .glob_cmd_reg = ADIS16460_REG_GLOB_CMD, + .prod_id_reg = ADIS16460_REG_PROD_ID, + .prod_id = 16460, + .self_test_mask = BIT(2), + .self_test_reg = ADIS16460_REG_GLOB_CMD, .has_paging = false, .read_delay = 5, .write_delay = 5, @@ -439,7 +409,7 @@ static int adis16460_probe(struct spi_device *spi) adis16460_enable_irq(&st->adis, 0); - ret = adis16460_initial_setup(indio_dev); + ret = __adis_initial_startup(&st->adis); if (ret) goto error_cleanup_buffer; diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index dac87f1001fd..cfae0e4476e7 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -138,7 +138,7 @@ struct adis16480_chip_info { unsigned int max_dec_rate; const unsigned int *filter_freqs; bool has_pps_clk_mode; - const struct adis_timeout *timeouts; + const struct adis_data adis_data; }; enum adis16480_int_pin { @@ -796,6 +796,58 @@ enum adis16480_variant { ADIS16497_3, }; +#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0 +#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1 +#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2 +#define ADIS16480_DIAG_STAT_XACCL_FAIL 3 +#define ADIS16480_DIAG_STAT_YACCL_FAIL 4 +#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5 +#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8 +#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9 +#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10 +#define ADIS16480_DIAG_STAT_BARO_FAIL 11 + +static const char * const adis16480_status_error_msgs[] = { + [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure", + [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure", + [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure", + [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure", + [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", + [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", + [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure", + [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure", + [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure", + [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure", +}; + +static int adis16480_enable_irq(struct adis *adis, bool enable); + +#define ADIS16480_DATA(_prod_id, _timeouts) \ +{ \ + .diag_stat_reg = ADIS16480_REG_DIAG_STS, \ + .glob_cmd_reg = ADIS16480_REG_GLOB_CMD, \ + .prod_id_reg = ADIS16480_REG_PROD_ID, \ + .prod_id = (_prod_id), \ + .has_paging = true, \ + .read_delay = 5, \ + .write_delay = 5, \ + .self_test_mask = BIT(1), \ + .self_test_reg = ADIS16480_REG_GLOB_CMD, \ + .status_error_msgs = adis16480_status_error_msgs, \ + .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) | \ + BIT(ADIS16480_DIAG_STAT_BARO_FAIL), \ + .enable_irq = adis16480_enable_irq, \ + .timeouts = (_timeouts), \ +} + static const struct adis_timeout adis16485_timeouts = { .reset_ms = 560, .sw_reset_ms = 120, @@ -838,7 +890,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .int_clk = 2460000, .max_dec_rate = 2048, .filter_freqs = adis16480_def_filter_freqs, - .timeouts = &adis16485_timeouts, + .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts), }, [ADIS16480] = { .channels = adis16480_channels, @@ -851,7 +903,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .int_clk = 2460000, .max_dec_rate = 2048, .filter_freqs = adis16480_def_filter_freqs, - .timeouts = &adis16480_timeouts, + .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts), }, [ADIS16485] = { .channels = adis16485_channels, @@ -864,7 +916,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .int_clk = 2460000, .max_dec_rate = 2048, .filter_freqs = adis16480_def_filter_freqs, - .timeouts = &adis16485_timeouts, + .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts), }, [ADIS16488] = { .channels = adis16480_channels, @@ -877,7 +929,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .int_clk = 2460000, .max_dec_rate = 2048, .filter_freqs = adis16480_def_filter_freqs, - .timeouts = &adis16485_timeouts, + .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts), }, [ADIS16490] = { .channels = adis16485_channels, @@ -891,7 +943,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_timeouts, + .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts), }, [ADIS16495_1] = { .channels = adis16485_channels, @@ -905,7 +957,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts), }, [ADIS16495_2] = { .channels = adis16485_channels, @@ -919,7 +971,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts), }, [ADIS16495_3] = { .channels = adis16485_channels, @@ -933,7 +985,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts), }, [ADIS16497_1] = { .channels = adis16485_channels, @@ -947,7 +999,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts), }, [ADIS16497_2] = { .channels = adis16485_channels, @@ -961,7 +1013,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts), }, [ADIS16497_3] = { .channels = adis16485_channels, @@ -975,7 +1027,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .max_dec_rate = 4250, .filter_freqs = adis16495_def_filter_freqs, .has_pps_clk_mode = true, - .timeouts = &adis16495_1_timeouts, + .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts), }, }; @@ -1014,87 +1066,6 @@ static int adis16480_enable_irq(struct adis *adis, bool enable) return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val); } -static int adis16480_initial_setup(struct iio_dev *indio_dev) -{ - struct adis16480 *st = iio_priv(indio_dev); - uint16_t prod_id; - unsigned int device_id; - int ret; - - adis_reset(&st->adis); - msleep(70); - - ret = adis_write_reg_16(&st->adis, ADIS16480_REG_GLOB_CMD, BIT(1)); - if (ret) - return ret; - msleep(30); - - ret = adis_check_status(&st->adis); - if (ret) - return ret; - - ret = adis_read_reg_16(&st->adis, ADIS16480_REG_PROD_ID, &prod_id); - if (ret) - return ret; - - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); - if (ret != 1) - return -EINVAL; - - if (prod_id != device_id) - dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.", - device_id, prod_id); - - return 0; -} - -#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0 -#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1 -#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2 -#define ADIS16480_DIAG_STAT_XACCL_FAIL 3 -#define ADIS16480_DIAG_STAT_YACCL_FAIL 4 -#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5 -#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8 -#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9 -#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10 -#define ADIS16480_DIAG_STAT_BARO_FAIL 11 - -static const char * const adis16480_status_error_msgs[] = { - [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure", - [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure", - [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure", - [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure", - [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", - [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", - [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure", - [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure", - [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure", - [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure", -}; - -static const struct adis_data adis16480_data = { - .diag_stat_reg = ADIS16480_REG_DIAG_STS, - .glob_cmd_reg = ADIS16480_REG_GLOB_CMD, - .has_paging = true, - - .read_delay = 5, - .write_delay = 5, - - .status_error_msgs = adis16480_status_error_msgs, - .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) | - BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) | - BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) | - BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) | - BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) | - BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) | - BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) | - BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) | - BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) | - BIT(ADIS16480_DIAG_STAT_BARO_FAIL), - - .enable_irq = adis16480_enable_irq, -}; - static int adis16480_config_irq_pin(struct device_node *of_node, struct adis16480 *st) { @@ -1245,22 +1216,6 @@ static int adis16480_get_ext_clocks(struct adis16480 *st) return 0; } -static struct adis_data *adis16480_adis_data_alloc(struct adis16480 *st, - struct device *dev) -{ - struct adis_data *data; - - data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); - - memcpy(data, &adis16480_data, sizeof(*data)); - - data->timeouts = st->chip_info->timeouts; - - return data; -} - static int adis16480_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); @@ -1285,26 +1240,28 @@ static int adis16480_probe(struct spi_device *spi) indio_dev->info = &adis16480_info; indio_dev->modes = INDIO_DIRECT_MODE; - adis16480_data = adis16480_adis_data_alloc(st, &spi->dev); - if (IS_ERR(adis16480_data)) - return PTR_ERR(adis16480_data); + adis16480_data = &st->chip_info->adis_data; ret = adis_init(&st->adis, indio_dev, spi, adis16480_data); if (ret) return ret; - ret = adis16480_config_irq_pin(spi->dev.of_node, st); + ret = __adis_initial_startup(&st->adis); if (ret) return ret; + ret = adis16480_config_irq_pin(spi->dev.of_node, st); + if (ret) + goto error_stop_device; + ret = adis16480_get_ext_clocks(st); if (ret) - return ret; + goto error_stop_device; if (!IS_ERR_OR_NULL(st->ext_clk)) { ret = adis16480_ext_clk_config(st, spi->dev.of_node, true); if (ret) - return ret; + goto error_stop_device; st->clk_freq = clk_get_rate(st->ext_clk); st->clk_freq *= 1000; /* micro */ @@ -1316,24 +1273,20 @@ static int adis16480_probe(struct spi_device *spi) if (ret) goto error_clk_disable_unprepare; - ret = adis16480_initial_setup(indio_dev); - if (ret) - goto error_cleanup_buffer; - ret = iio_device_register(indio_dev); if (ret) - goto error_stop_device; + goto error_cleanup_buffer; adis16480_debugfs_init(indio_dev); return 0; -error_stop_device: - adis16480_stop_device(indio_dev); error_cleanup_buffer: adis_cleanup_buffer_and_trigger(&st->adis, indio_dev); error_clk_disable_unprepare: clk_disable_unprepare(st->ext_clk); +error_stop_device: + adis16480_stop_device(indio_dev); return ret; } diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 3f4dd5c00b03..04e5e2a0fd6b 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -97,7 +97,8 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, if (j != scan_count) adis->xfer[j].cs_change = 1; adis->xfer[j].len = 2; - adis->xfer[j].delay_usecs = adis->data->read_delay; + adis->xfer[j].delay.value = adis->data->read_delay; + adis->xfer[j].delay.unit = SPI_DELAY_UNIT_USECS; if (j < scan_count) adis->xfer[j].tx_buf = &tx[j]; if (j >= 1) diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index 017bc0fcc365..7137ea6f25db 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -15,9 +15,9 @@ config INV_MPU6050_I2C select INV_MPU6050_IIO select REGMAP_I2C help - This driver supports the Invensense MPU6050/6500/6515, - MPU9150/9250/9255 and ICM20608/20602 motion tracking devices - over I2C. + This driver supports the Invensense MPU6050/9150, + MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and + IAM20680 motion tracking devices over I2C. This driver can be built as a module. The module will be called inv-mpu6050-i2c. @@ -27,8 +27,8 @@ config INV_MPU6050_SPI select INV_MPU6050_IIO select REGMAP_SPI help - This driver supports the Invensense MPU6000/6500/6515, - MPU9250/9255 and ICM20608/20602 motion tracking devices - over SPI. + This driver supports the Invensense MPU6000, + MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and + IAM20680 motion tracking devices over SPI. This driver can be built as a module. The module will be called inv-mpu6050-spi. diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 5096fc49012d..7cb9ff3d3e94 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -16,6 +16,8 @@ #include <linux/acpi.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include "inv_mpu_iio.h" #include "inv_mpu_magn.h" @@ -99,9 +101,31 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = { }; static const struct inv_mpu6050_chip_config chip_config_6050 = { + .clk = INV_CLK_INTERNAL, .fsr = INV_MPU6050_FSR_2000DPS, .lpf = INV_MPU6050_FILTER_20HZ, - .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE), + .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(50), + .gyro_en = true, + .accl_en = true, + .temp_en = true, + .magn_en = false, + .gyro_fifo_enable = false, + .accl_fifo_enable = false, + .temp_fifo_enable = false, + .magn_fifo_enable = false, + .accl_fs = INV_MPU6050_FS_02G, + .user_ctrl = 0, +}; + +static const struct inv_mpu6050_chip_config chip_config_6500 = { + .clk = INV_CLK_PLL, + .fsr = INV_MPU6050_FSR_2000DPS, + .lpf = INV_MPU6050_FILTER_20HZ, + .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(50), + .gyro_en = true, + .accl_en = true, + .temp_en = true, + .magn_en = false, .gyro_fifo_enable = false, .accl_fifo_enable = false, .temp_fifo_enable = false, @@ -124,7 +148,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .whoami = INV_MPU6500_WHOAMI_VALUE, .name = "MPU6500", .reg = ®_set_6500, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 512, .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, @@ -132,7 +156,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .whoami = INV_MPU6515_WHOAMI_VALUE, .name = "MPU6515", .reg = ®_set_6500, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 512, .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, @@ -156,7 +180,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .whoami = INV_MPU9250_WHOAMI_VALUE, .name = "MPU9250", .reg = ®_set_6500, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 512, .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, @@ -164,7 +188,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .whoami = INV_MPU9255_WHOAMI_VALUE, .name = "MPU9255", .reg = ®_set_6500, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 512, .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, @@ -172,104 +196,242 @@ static const struct inv_mpu6050_hw hw_info[] = { .whoami = INV_ICM20608_WHOAMI_VALUE, .name = "ICM20608", .reg = ®_set_6500, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 512, .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, { + .whoami = INV_ICM20609_WHOAMI_VALUE, + .name = "ICM20609", + .reg = ®_set_6500, + .config = &chip_config_6500, + .fifo_size = 4 * 1024, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + }, + { + .whoami = INV_ICM20689_WHOAMI_VALUE, + .name = "ICM20689", + .reg = ®_set_6500, + .config = &chip_config_6500, + .fifo_size = 4 * 1024, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + }, + { .whoami = INV_ICM20602_WHOAMI_VALUE, .name = "ICM20602", .reg = ®_set_icm20602, - .config = &chip_config_6050, + .config = &chip_config_6500, .fifo_size = 1008, .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, + { + .whoami = INV_ICM20690_WHOAMI_VALUE, + .name = "ICM20690", + .reg = ®_set_6500, + .config = &chip_config_6500, + .fifo_size = 1024, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + }, + { + .whoami = INV_IAM20680_WHOAMI_VALUE, + .name = "IAM20680", + .reg = ®_set_6500, + .config = &chip_config_6500, + .fifo_size = 512, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + }, }; -int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask) +static int inv_mpu6050_pwr_mgmt_1_write(struct inv_mpu6050_state *st, bool sleep, + int clock, int temp_dis) { - unsigned int d, mgmt_1; - int result; - /* - * switch clock needs to be careful. Only when gyro is on, can - * clock source be switched to gyro. Otherwise, it must be set to - * internal clock - */ - if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) { - result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1); - if (result) - return result; + u8 val; + + if (clock < 0) + clock = st->chip_config.clk; + if (temp_dis < 0) + temp_dis = !st->chip_config.temp_en; - mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK; + val = clock & INV_MPU6050_BIT_CLK_MASK; + if (temp_dis) + val |= INV_MPU6050_BIT_TEMP_DIS; + if (sleep) + val |= INV_MPU6050_BIT_SLEEP; + + dev_dbg(regmap_get_device(st->map), "pwr_mgmt_1: 0x%x\n", val); + return regmap_write(st->map, st->reg->pwr_mgmt_1, val); +} + +static int inv_mpu6050_clock_switch(struct inv_mpu6050_state *st, + unsigned int clock) +{ + int ret; + + switch (st->chip_type) { + case INV_MPU6050: + case INV_MPU6000: + case INV_MPU9150: + /* old chips: switch clock manually */ + ret = inv_mpu6050_pwr_mgmt_1_write(st, false, clock, -1); + if (ret) + return ret; + st->chip_config.clk = clock; + break; + default: + /* automatic clock switching, nothing to do */ + break; } - if ((mask == INV_MPU6050_BIT_PWR_GYRO_STBY) && (!en)) { - /* - * turning off gyro requires switch to internal clock first. - * Then turn off gyro engine - */ - mgmt_1 |= INV_CLK_INTERNAL; - result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1); - if (result) - return result; + return 0; +} + +int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, + unsigned int mask) +{ + unsigned int sleep; + u8 pwr_mgmt2, user_ctrl; + int ret; + + /* delete useless requests */ + if (mask & INV_MPU6050_SENSOR_ACCL && en == st->chip_config.accl_en) + mask &= ~INV_MPU6050_SENSOR_ACCL; + if (mask & INV_MPU6050_SENSOR_GYRO && en == st->chip_config.gyro_en) + mask &= ~INV_MPU6050_SENSOR_GYRO; + if (mask & INV_MPU6050_SENSOR_TEMP && en == st->chip_config.temp_en) + mask &= ~INV_MPU6050_SENSOR_TEMP; + if (mask & INV_MPU6050_SENSOR_MAGN && en == st->chip_config.magn_en) + mask &= ~INV_MPU6050_SENSOR_MAGN; + if (mask == 0) + return 0; + + /* turn on/off temperature sensor */ + if (mask & INV_MPU6050_SENSOR_TEMP) { + ret = inv_mpu6050_pwr_mgmt_1_write(st, false, -1, !en); + if (ret) + return ret; + st->chip_config.temp_en = en; } - result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d); - if (result) - return result; - if (en) - d &= ~mask; - else - d |= mask; - result = regmap_write(st->map, st->reg->pwr_mgmt_2, d); - if (result) - return result; + /* update user_crtl for driving magnetometer */ + if (mask & INV_MPU6050_SENSOR_MAGN) { + user_ctrl = st->chip_config.user_ctrl; + if (en) + user_ctrl |= INV_MPU6050_BIT_I2C_MST_EN; + else + user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN; + ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl); + if (ret) + return ret; + st->chip_config.user_ctrl = user_ctrl; + st->chip_config.magn_en = en; + } - if (en) { - /* Wait for output to stabilize */ - msleep(INV_MPU6050_TEMP_UP_TIME); - if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) { - /* switch internal clock to PLL */ - mgmt_1 |= INV_CLK_PLL; - result = regmap_write(st->map, - st->reg->pwr_mgmt_1, mgmt_1); - if (result) - return result; + /* manage accel & gyro engines */ + if (mask & (INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO)) { + /* compute power management 2 current value */ + pwr_mgmt2 = 0; + if (!st->chip_config.accl_en) + pwr_mgmt2 |= INV_MPU6050_BIT_PWR_ACCL_STBY; + if (!st->chip_config.gyro_en) + pwr_mgmt2 |= INV_MPU6050_BIT_PWR_GYRO_STBY; + + /* update to new requested value */ + if (mask & INV_MPU6050_SENSOR_ACCL) { + if (en) + pwr_mgmt2 &= ~INV_MPU6050_BIT_PWR_ACCL_STBY; + else + pwr_mgmt2 |= INV_MPU6050_BIT_PWR_ACCL_STBY; + } + if (mask & INV_MPU6050_SENSOR_GYRO) { + if (en) + pwr_mgmt2 &= ~INV_MPU6050_BIT_PWR_GYRO_STBY; + else + pwr_mgmt2 |= INV_MPU6050_BIT_PWR_GYRO_STBY; + } + + /* switch clock to internal when turning gyro off */ + if (mask & INV_MPU6050_SENSOR_GYRO && !en) { + ret = inv_mpu6050_clock_switch(st, INV_CLK_INTERNAL); + if (ret) + return ret; + } + + /* update sensors engine */ + dev_dbg(regmap_get_device(st->map), "pwr_mgmt_2: 0x%x\n", + pwr_mgmt2); + ret = regmap_write(st->map, st->reg->pwr_mgmt_2, pwr_mgmt2); + if (ret) + return ret; + if (mask & INV_MPU6050_SENSOR_ACCL) + st->chip_config.accl_en = en; + if (mask & INV_MPU6050_SENSOR_GYRO) + st->chip_config.gyro_en = en; + + /* compute required time to have sensors stabilized */ + sleep = 0; + if (en) { + if (mask & INV_MPU6050_SENSOR_ACCL) { + if (sleep < INV_MPU6050_ACCEL_UP_TIME) + sleep = INV_MPU6050_ACCEL_UP_TIME; + } + if (mask & INV_MPU6050_SENSOR_GYRO) { + if (sleep < INV_MPU6050_GYRO_UP_TIME) + sleep = INV_MPU6050_GYRO_UP_TIME; + } + } else { + if (mask & INV_MPU6050_SENSOR_GYRO) { + if (sleep < INV_MPU6050_GYRO_DOWN_TIME) + sleep = INV_MPU6050_GYRO_DOWN_TIME; + } + } + if (sleep) + msleep(sleep); + + /* switch clock to PLL when turning gyro on */ + if (mask & INV_MPU6050_SENSOR_GYRO && en) { + ret = inv_mpu6050_clock_switch(st, INV_CLK_PLL); + if (ret) + return ret; } } return 0; } -int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) +static int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, + bool power_on) { int result; - if (power_on) { - if (!st->powerup_count) { - result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0); - if (result) - return result; - usleep_range(INV_MPU6050_REG_UP_TIME_MIN, - INV_MPU6050_REG_UP_TIME_MAX); - } - st->powerup_count++; - } else { - if (st->powerup_count == 1) { - result = regmap_write(st->map, st->reg->pwr_mgmt_1, - INV_MPU6050_BIT_SLEEP); - if (result) - return result; - } - st->powerup_count--; - } + result = inv_mpu6050_pwr_mgmt_1_write(st, !power_on, -1, -1); + if (result) + return result; - dev_dbg(regmap_get_device(st->map), "set power %d, count=%u\n", - power_on, st->powerup_count); + if (power_on) + usleep_range(INV_MPU6050_REG_UP_TIME_MIN, + INV_MPU6050_REG_UP_TIME_MAX); return 0; } -EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); + +static int inv_mpu6050_set_gyro_fsr(struct inv_mpu6050_state *st, + enum inv_mpu6050_fsr_e val) +{ + unsigned int gyro_shift; + u8 data; + + switch (st->chip_type) { + case INV_ICM20690: + gyro_shift = INV_ICM20690_GYRO_CONFIG_FSR_SHIFT; + break; + default: + gyro_shift = INV_MPU6050_GYRO_CONFIG_FSR_SHIFT; + break; + } + + data = val << gyro_shift; + return regmap_write(st->map, st->reg->gyro_config, data); +} /** * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent @@ -286,20 +448,23 @@ static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, if (result) return result; + /* set accel lpf */ switch (st->chip_type) { case INV_MPU6050: case INV_MPU6000: case INV_MPU9150: /* old chips, nothing to do */ - result = 0; + return 0; + case INV_ICM20689: + case INV_ICM20690: + /* set FIFO size to maximum value */ + val |= INV_ICM20689_BITS_FIFO_SIZE_MAX; break; default: - /* set accel lpf */ - result = regmap_write(st->map, st->reg->accel_lpf, val); break; } - return result; + return regmap_write(st->map, st->reg->accel_lpf, val); } /** @@ -317,35 +482,28 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) u8 d; struct inv_mpu6050_state *st = iio_priv(indio_dev); - result = inv_mpu6050_set_power_itg(st, true); + result = inv_mpu6050_set_gyro_fsr(st, st->chip_config.fsr); if (result) return result; - d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); - result = regmap_write(st->map, st->reg->gyro_config, d); - if (result) - goto error_power_off; - result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); + result = inv_mpu6050_set_lpf_regs(st, st->chip_config.lpf); if (result) - goto error_power_off; + return result; - d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE); + d = st->chip_config.divider; result = regmap_write(st->map, st->reg->sample_rate_div, d); if (result) - goto error_power_off; + return result; - d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); + d = (st->chip_config.accl_fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); result = regmap_write(st->map, st->reg->accl_config, d); if (result) - goto error_power_off; + return result; result = regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask); if (result) return result; - memcpy(&st->chip_config, hw_info[st->chip_type].config, - sizeof(struct inv_mpu6050_chip_config)); - /* * Internal chip period is 1ms (1kHz). * Let's use at the beginning the theorical value before measuring @@ -356,13 +514,9 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) /* magn chip init, noop if not present in the chip */ result = inv_mpu_magn_probe(st); if (result) - goto error_power_off; - - return inv_mpu6050_set_power_itg(st, false); + return result; -error_power_off: - inv_mpu6050_set_power_itg(st, false); - return result; + return 0; } static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg, @@ -399,45 +553,85 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev, int *val) { struct inv_mpu6050_state *st = iio_priv(indio_dev); + struct device *pdev = regmap_get_device(st->map); + unsigned int freq_hz, period_us, min_sleep_us, max_sleep_us; int result; int ret; - result = inv_mpu6050_set_power_itg(st, true); - if (result) + /* compute sample period */ + freq_hz = INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); + period_us = 1000000 / freq_hz; + + result = pm_runtime_get_sync(pdev); + if (result < 0) { + pm_runtime_put_noidle(pdev); return result; + } switch (chan->type) { case IIO_ANGL_VEL: - result = inv_mpu6050_switch_engine(st, true, - INV_MPU6050_BIT_PWR_GYRO_STBY); - if (result) - goto error_power_off; + if (!st->chip_config.gyro_en) { + result = inv_mpu6050_switch_engine(st, true, + INV_MPU6050_SENSOR_GYRO); + if (result) + goto error_power_off; + /* need to wait 2 periods to have first valid sample */ + min_sleep_us = 2 * period_us; + max_sleep_us = 2 * (period_us + period_us / 2); + usleep_range(min_sleep_us, max_sleep_us); + } ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro, chan->channel2, val); - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_GYRO_STBY); - if (result) - goto error_power_off; break; case IIO_ACCEL: - result = inv_mpu6050_switch_engine(st, true, - INV_MPU6050_BIT_PWR_ACCL_STBY); - if (result) - goto error_power_off; + if (!st->chip_config.accl_en) { + result = inv_mpu6050_switch_engine(st, true, + INV_MPU6050_SENSOR_ACCL); + if (result) + goto error_power_off; + /* wait 1 period for first sample availability */ + min_sleep_us = period_us; + max_sleep_us = period_us + period_us / 2; + usleep_range(min_sleep_us, max_sleep_us); + } ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl, chan->channel2, val); - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_ACCL_STBY); - if (result) - goto error_power_off; break; case IIO_TEMP: - /* wait for stablization */ - msleep(INV_MPU6050_SENSOR_UP_TIME); + /* temperature sensor work only with accel and/or gyro */ + if (!st->chip_config.accl_en && !st->chip_config.gyro_en) { + result = -EBUSY; + goto error_power_off; + } + if (!st->chip_config.temp_en) { + result = inv_mpu6050_switch_engine(st, true, + INV_MPU6050_SENSOR_TEMP); + if (result) + goto error_power_off; + /* wait 1 period for first sample availability */ + min_sleep_us = period_us; + max_sleep_us = period_us + period_us / 2; + usleep_range(min_sleep_us, max_sleep_us); + } ret = inv_mpu6050_sensor_show(st, st->reg->temperature, IIO_MOD_X, val); break; case IIO_MAGN: + if (!st->chip_config.magn_en) { + result = inv_mpu6050_switch_engine(st, true, + INV_MPU6050_SENSOR_MAGN); + if (result) + goto error_power_off; + /* frequency is limited for magnetometer */ + if (freq_hz > INV_MPU_MAGN_FREQ_HZ_MAX) { + freq_hz = INV_MPU_MAGN_FREQ_HZ_MAX; + period_us = 1000000 / freq_hz; + } + /* need to wait 2 periods to have first valid sample */ + min_sleep_us = 2 * period_us; + max_sleep_us = 2 * (period_us + period_us / 2); + usleep_range(min_sleep_us, max_sleep_us); + } ret = inv_mpu_magn_read(st, chan->channel2, val); break; default: @@ -445,14 +639,13 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev, break; } - result = inv_mpu6050_set_power_itg(st, false); - if (result) - goto error_power_off; + pm_runtime_mark_last_busy(pdev); + pm_runtime_put_autosuspend(pdev); return ret; error_power_off: - inv_mpu6050_set_power_itg(st, false); + pm_runtime_put_autosuspend(pdev); return result; } @@ -533,12 +726,10 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) { int result, i; - u8 d; for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) { if (gyro_scale_6050[i] == val) { - d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); - result = regmap_write(st->map, st->reg->gyro_config, d); + result = inv_mpu6050_set_gyro_fsr(st, i); if (result) return result; @@ -593,6 +784,7 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct inv_mpu6050_state *st = iio_priv(indio_dev); + struct device *pdev = regmap_get_device(st->map); int result; /* @@ -604,9 +796,11 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, return result; mutex_lock(&st->lock); - result = inv_mpu6050_set_power_itg(st, true); - if (result) + result = pm_runtime_get_sync(pdev); + if (result < 0) { + pm_runtime_put_noidle(pdev); goto error_write_raw_unlock; + } switch (mask) { case IIO_CHAN_INFO_SCALE: @@ -644,7 +838,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, break; } - result |= inv_mpu6050_set_power_itg(st, false); + pm_runtime_mark_last_busy(pdev); + pm_runtime_put_autosuspend(pdev); error_write_raw_unlock: mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); @@ -655,30 +850,32 @@ error_write_raw_unlock: /** * inv_mpu6050_set_lpf() - set low pass filer based on fifo rate. * - * Based on the Nyquist principle, the sampling rate must - * exceed twice of the bandwidth of the signal, or there - * would be alising. This function basically search for the - * correct low pass parameters based on the fifo rate, e.g, - * sampling frequency. + * Based on the Nyquist principle, the bandwidth of the low + * pass filter must not exceed the signal sampling rate divided + * by 2, or there would be aliasing. + * This function basically search for the correct low pass + * parameters based on the fifo rate, e.g, sampling frequency. * * lpf is set automatically when setting sampling rate to avoid any aliases. */ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) { - static const int hz[] = {188, 98, 42, 20, 10, 5}; + static const int hz[] = {400, 200, 90, 40, 20, 10}; static const int d[] = { - INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ, - INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ, + INV_MPU6050_FILTER_200HZ, INV_MPU6050_FILTER_100HZ, + INV_MPU6050_FILTER_45HZ, INV_MPU6050_FILTER_20HZ, INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ }; - int i, h, result; + int i, result; u8 data; - h = (rate >> 1); - i = 0; - while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) - i++; - data = d[i]; + data = INV_MPU6050_FILTER_5HZ; + for (i = 0; i < ARRAY_SIZE(hz); ++i) { + if (rate >= hz[i]) { + data = d[i]; + break; + } + } result = inv_mpu6050_set_lpf_regs(st, data); if (result) return result; @@ -699,6 +896,7 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr, int result; struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct inv_mpu6050_state *st = iio_priv(indio_dev); + struct device *pdev = regmap_get_device(st->map); if (kstrtoint(buf, 10, &fifo_rate)) return -EINVAL; @@ -706,10 +904,6 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr, fifo_rate > INV_MPU6050_MAX_FIFO_RATE) return -EINVAL; - result = iio_device_claim_direct_mode(indio_dev); - if (result) - return result; - /* compute the chip sample rate divider */ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate); /* compute back the fifo rate to handle truncation cases */ @@ -720,9 +914,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr, result = 0; goto fifo_rate_fail_unlock; } - result = inv_mpu6050_set_power_itg(st, true); - if (result) + result = pm_runtime_get_sync(pdev); + if (result < 0) { + pm_runtime_put_noidle(pdev); goto fifo_rate_fail_unlock; + } result = regmap_write(st->map, st->reg->sample_rate_div, d); if (result) @@ -738,11 +934,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr, if (result) goto fifo_rate_fail_power_off; + pm_runtime_mark_last_busy(pdev); fifo_rate_fail_power_off: - result |= inv_mpu6050_set_power_itg(st, false); + pm_runtime_put_autosuspend(pdev); fifo_rate_fail_unlock: mutex_unlock(&st->lock); - iio_device_release_direct_mode(indio_dev); if (result) return result; @@ -1066,11 +1262,13 @@ static const struct iio_info mpu_info = { static int inv_check_and_setup_chip(struct inv_mpu6050_state *st) { int result; - unsigned int regval; + unsigned int regval, mask; int i; st->hw = &hw_info[st->chip_type]; st->reg = hw_info[st->chip_type].reg; + memcpy(&st->chip_config, hw_info[st->chip_type].config, + sizeof(st->chip_config)); /* check chip self-identification */ result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, ®val); @@ -1102,6 +1300,24 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st) if (result) return result; msleep(INV_MPU6050_POWER_UP_TIME); + switch (st->chip_type) { + case INV_MPU6000: + case INV_MPU6500: + case INV_MPU6515: + case INV_MPU9250: + case INV_MPU9255: + /* reset signal path (required for spi connection) */ + regval = INV_MPU6050_BIT_TEMP_RST | INV_MPU6050_BIT_ACCEL_RST | + INV_MPU6050_BIT_GYRO_RST; + result = regmap_write(st->map, INV_MPU6050_REG_SIGNAL_PATH_RESET, + regval); + if (result) + return result; + msleep(INV_MPU6050_POWER_UP_TIME); + break; + default: + break; + } /* * Turn power on. After reset, the sleep bit could be on @@ -1112,17 +1328,13 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st) result = inv_mpu6050_set_power_itg(st, true); if (result) return result; - - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_ACCL_STBY); - if (result) - goto error_power_off; - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_GYRO_STBY); + mask = INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO | + INV_MPU6050_SENSOR_TEMP | INV_MPU6050_SENSOR_MAGN; + result = inv_mpu6050_switch_engine(st, false, mask); if (result) goto error_power_off; - return inv_mpu6050_set_power_itg(st, false); + return 0; error_power_off: inv_mpu6050_set_power_itg(st, false); @@ -1139,7 +1351,7 @@ static int inv_mpu_core_enable_regulator_vddio(struct inv_mpu6050_state *st) "Failed to enable vddio regulator: %d\n", result); } else { /* Give the device a little bit of time to start up. */ - usleep_range(35000, 70000); + usleep_range(3000, 5000); } return result; @@ -1170,6 +1382,14 @@ static void inv_mpu_core_disable_regulator_action(void *_data) inv_mpu_core_disable_regulator_vddio(st); } +static void inv_mpu_pm_disable(void *data) +{ + struct device *dev = data; + + pm_runtime_put_sync_suspend(dev); + pm_runtime_disable(dev); +} + int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type) { @@ -1194,7 +1414,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, st = iio_priv(indio_dev); mutex_init(&st->lock); st->chip_type = chip_type; - st->powerup_count = 0; st->irq = irq; st->map = regmap; @@ -1259,6 +1478,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, dev_err(dev, "Failed to enable vdd regulator: %d\n", result); return result; } + msleep(INV_MPU6050_POWER_UP_TIME); result = inv_mpu_core_enable_regulator_vddio(st); if (result) { @@ -1287,7 +1507,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, result = inv_mpu6050_init_config(indio_dev); if (result) { dev_err(dev, "Could not initialize device.\n"); - return result; + goto error_power_off; } dev_set_drvdata(dev, indio_dev); @@ -1299,8 +1519,24 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, indio_dev->name = dev_name(dev); /* requires parent device set in indio_dev */ - if (inv_mpu_bus_setup) - inv_mpu_bus_setup(indio_dev); + if (inv_mpu_bus_setup) { + result = inv_mpu_bus_setup(indio_dev); + if (result) + goto error_power_off; + } + + /* chip init is done, turning on runtime power management */ + result = pm_runtime_set_active(dev); + if (result) + goto error_power_off; + pm_runtime_get_noresume(dev); + pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, INV_MPU6050_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(dev); + pm_runtime_put(dev); + result = devm_add_action_or_reset(dev, inv_mpu_pm_disable, dev); + if (result) + return result; switch (chip_type) { case INV_MPU9150: @@ -1359,14 +1595,17 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, } return 0; + +error_power_off: + inv_mpu6050_set_power_itg(st, false); + return result; } EXPORT_SYMBOL_GPL(inv_mpu_core_probe); -#ifdef CONFIG_PM_SLEEP - -static int inv_mpu_resume(struct device *dev) +static int __maybe_unused inv_mpu_resume(struct device *dev) { - struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct inv_mpu6050_state *st = iio_priv(indio_dev); int result; mutex_lock(&st->lock); @@ -1375,27 +1614,101 @@ static int inv_mpu_resume(struct device *dev) goto out_unlock; result = inv_mpu6050_set_power_itg(st, true); + if (result) + goto out_unlock; + + result = inv_mpu6050_switch_engine(st, true, st->suspended_sensors); + if (result) + goto out_unlock; + + if (iio_buffer_enabled(indio_dev)) + result = inv_mpu6050_prepare_fifo(st, true); + out_unlock: mutex_unlock(&st->lock); return result; } -static int inv_mpu_suspend(struct device *dev) +static int __maybe_unused inv_mpu_suspend(struct device *dev) { - struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct inv_mpu6050_state *st = iio_priv(indio_dev); int result; mutex_lock(&st->lock); + + if (iio_buffer_enabled(indio_dev)) { + result = inv_mpu6050_prepare_fifo(st, false); + if (result) + goto out_unlock; + } + + st->suspended_sensors = 0; + if (st->chip_config.accl_en) + st->suspended_sensors |= INV_MPU6050_SENSOR_ACCL; + if (st->chip_config.gyro_en) + st->suspended_sensors |= INV_MPU6050_SENSOR_GYRO; + if (st->chip_config.temp_en) + st->suspended_sensors |= INV_MPU6050_SENSOR_TEMP; + if (st->chip_config.magn_en) + st->suspended_sensors |= INV_MPU6050_SENSOR_MAGN; + result = inv_mpu6050_switch_engine(st, false, st->suspended_sensors); + if (result) + goto out_unlock; + result = inv_mpu6050_set_power_itg(st, false); + if (result) + goto out_unlock; + inv_mpu_core_disable_regulator_vddio(st); +out_unlock: mutex_unlock(&st->lock); return result; } -#endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume); +static int __maybe_unused inv_mpu_runtime_suspend(struct device *dev) +{ + struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); + unsigned int sensors; + int ret; + + mutex_lock(&st->lock); + + sensors = INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO | + INV_MPU6050_SENSOR_TEMP | INV_MPU6050_SENSOR_MAGN; + ret = inv_mpu6050_switch_engine(st, false, sensors); + if (ret) + goto out_unlock; + + ret = inv_mpu6050_set_power_itg(st, false); + if (ret) + goto out_unlock; + + inv_mpu_core_disable_regulator_vddio(st); + +out_unlock: + mutex_unlock(&st->lock); + return ret; +} + +static int __maybe_unused inv_mpu_runtime_resume(struct device *dev) +{ + struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); + int ret; + + ret = inv_mpu_core_enable_regulator_vddio(st); + if (ret) + return ret; + + return inv_mpu6050_set_power_itg(st, true); +} + +const struct dev_pm_ops inv_mpu_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(inv_mpu_suspend, inv_mpu_resume) + SET_RUNTIME_PM_OPS(inv_mpu_runtime_suspend, inv_mpu_runtime_resume, NULL) +}; EXPORT_SYMBOL_GPL(inv_mpu_pmops); MODULE_AUTHOR("Invensense Corporation"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index f47a28b4be23..6993d3b87bb0 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -10,6 +10,7 @@ #include <linux/iio/iio.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/property.h> #include "inv_mpu_iio.h" static const struct regmap_config inv_mpu_regmap_config = { @@ -19,62 +20,19 @@ static const struct regmap_config inv_mpu_regmap_config = { static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id) { - struct iio_dev *indio_dev = i2c_mux_priv(muxc); - struct inv_mpu6050_state *st = iio_priv(indio_dev); - int ret; - - mutex_lock(&st->lock); - - ret = inv_mpu6050_set_power_itg(st, true); - if (ret) - goto error_unlock; - - ret = regmap_write(st->map, st->reg->int_pin_cfg, - st->irq_mask | INV_MPU6050_BIT_BYPASS_EN); - -error_unlock: - mutex_unlock(&st->lock); - - return ret; -} - -static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id) -{ - struct iio_dev *indio_dev = i2c_mux_priv(muxc); - struct inv_mpu6050_state *st = iio_priv(indio_dev); - - mutex_lock(&st->lock); - - /* It doesn't really matter if any of the calls fail */ - regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask); - inv_mpu6050_set_power_itg(st, false); - - mutex_unlock(&st->lock); - return 0; } -static const char *inv_mpu_match_acpi_device(struct device *dev, - enum inv_devices *chip_id) -{ - const struct acpi_device_id *id; - - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return NULL; - - *chip_id = (int)id->driver_data; - - return dev_name(dev); -} - static bool inv_mpu_i2c_aux_bus(struct device *dev) { struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); switch (st->chip_type) { case INV_ICM20608: + case INV_ICM20609: + case INV_ICM20689: case INV_ICM20602: + case INV_IAM20680: /* no i2c auxiliary bus on the chip */ return false; case INV_MPU9150: @@ -89,19 +47,20 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev) } } -/* - * MPU9xxx magnetometer support requires to disable i2c auxiliary bus support. - * To ensure backward compatibility with existing setups, do not disable - * i2c auxiliary bus if it used. - * Check for i2c-gate node in devicetree and set magnetometer disabled. - * Only MPU6500 is supported by ACPI, no need to check. - */ -static int inv_mpu_magn_disable(struct iio_dev *indio_dev) +static int inv_mpu_i2c_aux_setup(struct iio_dev *indio_dev) { struct inv_mpu6050_state *st = iio_priv(indio_dev); struct device *dev = indio_dev->dev.parent; struct device_node *mux_node; + int ret; + /* + * MPU9xxx magnetometer support requires to disable i2c auxiliary bus. + * To ensure backward compatibility with existing setups, do not disable + * i2c auxiliary bus if it used. + * Check for i2c-gate node in devicetree and set magnetometer disabled. + * Only MPU6500 is supported by ACPI, no need to check. + */ switch (st->chip_type) { case INV_MPU9150: case INV_MPU9250: @@ -117,6 +76,14 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev) break; } + /* enable i2c bypass when using i2c auxiliary bus */ + if (inv_mpu_i2c_aux_bus(dev)) { + ret = regmap_write(st->map, st->reg->int_pin_cfg, + st->irq_mask | INV_MPU6050_BIT_BYPASS_EN); + if (ret) + return ret; + } + return 0; } @@ -130,6 +97,7 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev) static int inv_mpu_probe(struct i2c_client *client, const struct i2c_device_id *id) { + const void *match; struct inv_mpu6050_state *st; int result; enum inv_devices chip_type; @@ -140,18 +108,14 @@ static int inv_mpu_probe(struct i2c_client *client, I2C_FUNC_SMBUS_I2C_BLOCK)) return -EOPNOTSUPP; - if (client->dev.of_node) { - chip_type = (enum inv_devices) - of_device_get_match_data(&client->dev); + match = device_get_match_data(&client->dev); + if (match) { + chip_type = (enum inv_devices)match; name = client->name; } else if (id) { chip_type = (enum inv_devices) id->driver_data; name = id->name; - } else if (ACPI_HANDLE(&client->dev)) { - name = inv_mpu_match_acpi_device(&client->dev, &chip_type); - if (!name) - return -ENODEV; } else { return -ENOSYS; } @@ -164,7 +128,7 @@ static int inv_mpu_probe(struct i2c_client *client, } result = inv_mpu_core_probe(regmap, client->irq, name, - inv_mpu_magn_disable, chip_type); + inv_mpu_i2c_aux_setup, chip_type); if (result < 0) return result; @@ -173,8 +137,7 @@ static int inv_mpu_probe(struct i2c_client *client, /* declare i2c auxiliary bus */ st->muxc = i2c_mux_alloc(client->adapter, &client->dev, 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE, - inv_mpu6050_select_bypass, - inv_mpu6050_deselect_bypass); + inv_mpu6050_select_bypass, NULL); if (!st->muxc) return -ENOMEM; st->muxc->priv = dev_get_drvdata(&client->dev); @@ -218,7 +181,11 @@ static const struct i2c_device_id inv_mpu_id[] = { {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20609", INV_ICM20609}, + {"icm20689", INV_ICM20689}, {"icm20602", INV_ICM20602}, + {"icm20690", INV_ICM20690}, + {"iam20680", INV_IAM20680}, {} }; @@ -254,9 +221,25 @@ static const struct of_device_id inv_of_match[] = { .data = (void *)INV_ICM20608 }, { + .compatible = "invensense,icm20609", + .data = (void *)INV_ICM20609 + }, + { + .compatible = "invensense,icm20689", + .data = (void *)INV_ICM20689 + }, + { .compatible = "invensense,icm20602", .data = (void *)INV_ICM20602 }, + { + .compatible = "invensense,icm20690", + .data = (void *)INV_ICM20690 + }, + { + .compatible = "invensense,iam20680", + .data = (void *)INV_IAM20680 + }, { } }; MODULE_DEVICE_TABLE(of, inv_of_match); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index 6158fca7f70e..cd38b3fccc7b 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -75,15 +75,30 @@ enum inv_devices { INV_MPU9250, INV_MPU9255, INV_ICM20608, + INV_ICM20609, + INV_ICM20689, INV_ICM20602, + INV_ICM20690, + INV_IAM20680, INV_NUM_PARTS }; +/* chip sensors mask: accelerometer, gyroscope, temperature, magnetometer */ +#define INV_MPU6050_SENSOR_ACCL BIT(0) +#define INV_MPU6050_SENSOR_GYRO BIT(1) +#define INV_MPU6050_SENSOR_TEMP BIT(2) +#define INV_MPU6050_SENSOR_MAGN BIT(3) + /** * struct inv_mpu6050_chip_config - Cached chip configuration data. + * @clk: selected chip clock * @fsr: Full scale range. * @lpf: Digital low pass filter frequency. * @accl_fs: accel full scale range. + * @accl_en: accel engine enabled + * @gyro_en: gyro engine enabled + * @temp_en: temperature sensor enabled + * @magn_en: magn engine (i2c master) enabled * @accl_fifo_enable: enable accel data output * @gyro_fifo_enable: enable gyro data output * @temp_fifo_enable: enable temp data output @@ -91,9 +106,14 @@ enum inv_devices { * @divider: chip sample rate divider (sample rate divider - 1) */ struct inv_mpu6050_chip_config { + unsigned int clk:3; unsigned int fsr:2; unsigned int lpf:3; unsigned int accl_fs:2; + unsigned int accl_en:1; + unsigned int gyro_en:1; + unsigned int temp_en:1; + unsigned int magn_en:1; unsigned int accl_fifo_enable:1; unsigned int gyro_fifo_enable:1; unsigned int temp_fifo_enable:1; @@ -144,6 +164,7 @@ struct inv_mpu6050_hw { * @magn_disabled: magnetometer disabled for backward compatibility reason. * @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss. * @magn_orient: magnetometer sensor chip orientation if available. + * @suspended_sensors: sensors mask of sensors turned off for suspend */ struct inv_mpu6050_state { struct mutex lock; @@ -154,7 +175,6 @@ struct inv_mpu6050_state { enum inv_devices chip_type; struct i2c_mux_core *muxc; struct i2c_client *mux_client; - unsigned int powerup_count; struct inv_mpu6050_platform_data plat_data; struct iio_mount_matrix orientation; struct regmap *map; @@ -169,6 +189,7 @@ struct inv_mpu6050_state { bool magn_disabled; s32 magn_raw_to_gauss[3]; struct iio_mount_matrix magn_orient; + unsigned int suspended_sensors; }; /*register and associated bit definition*/ @@ -241,7 +262,13 @@ struct inv_mpu6050_state { #define INV_MPU6050_BIT_I2C_SLV3_DLY_EN 0x08 #define INV_MPU6050_BIT_DELAY_ES_SHADOW 0x80 +#define INV_MPU6050_REG_SIGNAL_PATH_RESET 0x68 +#define INV_MPU6050_BIT_TEMP_RST BIT(0) +#define INV_MPU6050_BIT_ACCEL_RST BIT(1) +#define INV_MPU6050_BIT_GYRO_RST BIT(2) + #define INV_MPU6050_REG_USER_CTRL 0x6A +#define INV_MPU6050_BIT_SIG_COND_RST 0x01 #define INV_MPU6050_BIT_FIFO_RST 0x04 #define INV_MPU6050_BIT_DMP_RST 0x08 #define INV_MPU6050_BIT_I2C_MST_EN 0x20 @@ -252,6 +279,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_REG_PWR_MGMT_1 0x6B #define INV_MPU6050_BIT_H_RESET 0x80 #define INV_MPU6050_BIT_SLEEP 0x40 +#define INV_MPU6050_BIT_TEMP_DIS 0x08 #define INV_MPU6050_BIT_CLK_MASK 0x7 #define INV_MPU6050_REG_PWR_MGMT_2 0x6C @@ -276,12 +304,16 @@ struct inv_mpu6050_state { /* mpu6500 registers */ #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D +#define INV_ICM20689_BITS_FIFO_SIZE_MAX 0xC0 #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 /* delay time in milliseconds */ #define INV_MPU6050_POWER_UP_TIME 100 #define INV_MPU6050_TEMP_UP_TIME 100 -#define INV_MPU6050_SENSOR_UP_TIME 30 +#define INV_MPU6050_ACCEL_UP_TIME 20 +#define INV_MPU6050_GYRO_UP_TIME 35 +#define INV_MPU6050_GYRO_DOWN_TIME 150 +#define INV_MPU6050_SUSPEND_DELAY_MS 2000 /* delay time in microseconds */ #define INV_MPU6050_REG_UP_TIME_MIN 5000 @@ -293,6 +325,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_MAX_ACCL_FS_PARAM 3 #define INV_MPU6050_THREE_AXIS 3 #define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3 +#define INV_ICM20690_GYRO_CONFIG_FSR_SHIFT 2 #define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3 #define INV_MPU6500_TEMP_OFFSET 7011 @@ -315,7 +348,6 @@ struct inv_mpu6050_state { #define INV_MPU6050_TS_PERIOD_JITTER 4 /* init parameters */ -#define INV_MPU6050_INIT_FIFO_RATE 50 #define INV_MPU6050_MAX_FIFO_RATE 1000 #define INV_MPU6050_MIN_FIFO_RATE 4 @@ -340,7 +372,11 @@ struct inv_mpu6050_state { #define INV_MPU9255_WHOAMI_VALUE 0x73 #define INV_MPU6515_WHOAMI_VALUE 0x74 #define INV_ICM20608_WHOAMI_VALUE 0xAF +#define INV_ICM20609_WHOAMI_VALUE 0xA6 +#define INV_ICM20689_WHOAMI_VALUE 0x98 #define INV_ICM20602_WHOAMI_VALUE 0x12 +#define INV_ICM20690_WHOAMI_VALUE 0x20 +#define INV_IAM20680_WHOAMI_VALUE 0xA9 /* scan element definition for generic MPU6xxx devices */ enum inv_mpu6050_scan { @@ -360,14 +396,14 @@ enum inv_mpu6050_scan { }; enum inv_mpu6050_filter_e { - INV_MPU6050_FILTER_256HZ_NOLPF2 = 0, - INV_MPU6050_FILTER_188HZ, - INV_MPU6050_FILTER_98HZ, - INV_MPU6050_FILTER_42HZ, + INV_MPU6050_FILTER_NOLPF2 = 0, + INV_MPU6050_FILTER_200HZ, + INV_MPU6050_FILTER_100HZ, + INV_MPU6050_FILTER_45HZ, INV_MPU6050_FILTER_20HZ, INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ, - INV_MPU6050_FILTER_2100HZ_NOLPF, + INV_MPU6050_FILTER_NOLPF, NUM_MPU6050_FILTER }; @@ -401,10 +437,10 @@ enum inv_mpu6050_clock_sel_e { irqreturn_t inv_mpu6050_read_fifo(int irq, void *p); int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type); -int inv_reset_fifo(struct iio_dev *indio_dev); -int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask); +int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable); +int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, + unsigned int mask); int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val); -int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on); int inv_mpu_acpi_create_mux_client(struct i2c_client *client); void inv_mpu_acpi_delete_mux_client(struct i2c_client *client); int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c index 4f192352521e..f282e9cc34c5 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c @@ -44,9 +44,6 @@ #define INV_MPU_MAGN_REG_ASAY 0x11 #define INV_MPU_MAGN_REG_ASAZ 0x12 -/* Magnetometer maximum frequency */ -#define INV_MPU_MAGN_FREQ_HZ_MAX 50 - static bool inv_magn_supported(const struct inv_mpu6050_state *st) { switch (st->chip_type) { @@ -316,59 +313,32 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st) * * Returns 0 on success, a negative error code otherwise */ -int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val) +int inv_mpu_magn_read(struct inv_mpu6050_state *st, int axis, int *val) { - unsigned int user_ctrl, status; - __be16 data[3]; + unsigned int status; + __be16 data; uint8_t addr; - uint8_t d; - unsigned int period_ms; int ret; /* quit if chip is not supported */ if (!inv_magn_supported(st)) return -ENODEV; - /* Mag data: X - Y - Z */ + /* Mag data: XH,XL,YH,YL,ZH,ZL */ switch (axis) { case IIO_MOD_X: addr = 0; break; case IIO_MOD_Y: - addr = 1; + addr = 2; break; case IIO_MOD_Z: - addr = 2; + addr = 4; break; default: return -EINVAL; } - - /* set sample rate to max mag freq */ - d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU_MAGN_FREQ_HZ_MAX); - ret = regmap_write(st->map, st->reg->sample_rate_div, d); - if (ret) - return ret; - - /* start i2c master, wait for xfer, stop */ - user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN; - ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl); - if (ret) - return ret; - - /* need to wait 2 periods + half-period margin */ - period_ms = 1000 / INV_MPU_MAGN_FREQ_HZ_MAX; - msleep(period_ms * 2 + period_ms / 2); - user_ctrl = st->chip_config.user_ctrl; - ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl); - if (ret) - return ret; - - /* restore sample rate */ - d = st->chip_config.divider; - ret = regmap_write(st->map, st->reg->sample_rate_div, d); - if (ret) - return ret; + addr += INV_MPU6050_REG_EXT_SENS_DATA; /* check i2c status and read raw data */ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status); @@ -379,12 +349,11 @@ int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val) status & INV_MPU6050_BIT_I2C_SLV1_NACK) return -EIO; - ret = regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA, - data, sizeof(data)); + ret = regmap_bulk_read(st->map, addr, &data, sizeof(data)); if (ret) return ret; - *val = (int16_t)be16_to_cpu(data[addr]); + *val = (int16_t)be16_to_cpu(data); return IIO_VAL_INT; } diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h index b41bd0578478..185c000c697c 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h @@ -10,6 +10,9 @@ #include "inv_mpu_iio.h" +/* Magnetometer maximum frequency */ +#define INV_MPU_MAGN_FREQ_HZ_MAX 50 + int inv_mpu_magn_probe(struct inv_mpu6050_state *st); /** @@ -31,6 +34,6 @@ int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate); int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st); -int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val); +int inv_mpu_magn_read(struct inv_mpu6050_state *st, int axis, int *val); #endif /* INV_MPU_MAGN_H_ */ diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index f9fdf4302a91..9511e4715e2c 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -90,63 +90,14 @@ static s64 inv_mpu6050_get_timestamp(struct inv_mpu6050_state *st) return ts; } -int inv_reset_fifo(struct iio_dev *indio_dev) +static int inv_reset_fifo(struct iio_dev *indio_dev) { int result; - u8 d; struct inv_mpu6050_state *st = iio_priv(indio_dev); - /* reset it timestamp validation */ - st->it_timestamp = 0; - - /* disable interrupt */ - result = regmap_write(st->map, st->reg->int_enable, 0); - if (result) { - dev_err(regmap_get_device(st->map), "int_enable failed %d\n", - result); - return result; - } - /* disable the sensor output to FIFO */ - result = regmap_write(st->map, st->reg->fifo_en, 0); - if (result) - goto reset_fifo_fail; - /* disable fifo reading */ - result = regmap_write(st->map, st->reg->user_ctrl, - st->chip_config.user_ctrl); - if (result) - goto reset_fifo_fail; - - /* reset FIFO*/ - d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST; - result = regmap_write(st->map, st->reg->user_ctrl, d); - if (result) - goto reset_fifo_fail; - - /* enable interrupt */ - if (st->chip_config.accl_fifo_enable || - st->chip_config.gyro_fifo_enable || - st->chip_config.magn_fifo_enable) { - result = regmap_write(st->map, st->reg->int_enable, - INV_MPU6050_BIT_DATA_RDY_EN); - if (result) - return result; - } - /* enable FIFO reading */ - d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_EN; - result = regmap_write(st->map, st->reg->user_ctrl, d); - if (result) - goto reset_fifo_fail; - /* enable sensor output to FIFO */ - d = 0; - if (st->chip_config.gyro_fifo_enable) - d |= INV_MPU6050_BITS_GYRO_OUT; - if (st->chip_config.accl_fifo_enable) - d |= INV_MPU6050_BIT_ACCEL_OUT; - if (st->chip_config.temp_fifo_enable) - d |= INV_MPU6050_BIT_TEMP_OUT; - if (st->chip_config.magn_fifo_enable) - d |= INV_MPU6050_BIT_SLAVE_0; - result = regmap_write(st->map, st->reg->fifo_en, d); + /* disable fifo and reenable it */ + inv_mpu6050_prepare_fifo(st, false); + result = inv_mpu6050_prepare_fifo(st, true); if (result) goto reset_fifo_fail; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index ec102d5a5c77..673b198e6368 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -4,6 +4,8 @@ */ #include <linux/module.h> #include <linux/acpi.h> +#include <linux/of.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <linux/regmap.h> #include <linux/iio/iio.h> @@ -19,10 +21,6 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev) struct inv_mpu6050_state *st = iio_priv(indio_dev); int ret = 0; - ret = inv_mpu6050_set_power_itg(st, true); - if (ret) - return ret; - if (st->reg->i2c_if) { ret = regmap_write(st->map, st->reg->i2c_if, INV_ICM20602_BIT_I2C_IF_DIS); @@ -31,27 +29,24 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev) ret = regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl); } - if (ret) { - inv_mpu6050_set_power_itg(st, false); - return ret; - } - return inv_mpu6050_set_power_itg(st, false); + return ret; } static int inv_mpu_probe(struct spi_device *spi) { + const void *match; struct regmap *regmap; const struct spi_device_id *spi_id; - const struct acpi_device_id *acpi_id; const char *name = NULL; enum inv_devices chip_type; if ((spi_id = spi_get_device_id(spi))) { chip_type = (enum inv_devices)spi_id->driver_data; name = spi_id->name; - } else if ((acpi_id = acpi_match_device(spi->dev.driver->acpi_match_table, &spi->dev))) { - chip_type = (enum inv_devices)acpi_id->driver_data; + } else if ((match = device_get_match_data(&spi->dev))) { + chip_type = (enum inv_devices)match; + name = dev_name(&spi->dev); } else { return -ENODEV; } @@ -74,15 +69,69 @@ static int inv_mpu_probe(struct spi_device *spi) static const struct spi_device_id inv_mpu_id[] = { {"mpu6000", INV_MPU6000}, {"mpu6500", INV_MPU6500}, + {"mpu6515", INV_MPU6515}, {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20609", INV_ICM20609}, + {"icm20689", INV_ICM20689}, {"icm20602", INV_ICM20602}, + {"icm20690", INV_ICM20690}, + {"iam20680", INV_IAM20680}, {} }; MODULE_DEVICE_TABLE(spi, inv_mpu_id); +static const struct of_device_id inv_of_match[] = { + { + .compatible = "invensense,mpu6000", + .data = (void *)INV_MPU6000 + }, + { + .compatible = "invensense,mpu6500", + .data = (void *)INV_MPU6500 + }, + { + .compatible = "invensense,mpu6515", + .data = (void *)INV_MPU6515 + }, + { + .compatible = "invensense,mpu9250", + .data = (void *)INV_MPU9250 + }, + { + .compatible = "invensense,mpu9255", + .data = (void *)INV_MPU9255 + }, + { + .compatible = "invensense,icm20608", + .data = (void *)INV_ICM20608 + }, + { + .compatible = "invensense,icm20609", + .data = (void *)INV_ICM20609 + }, + { + .compatible = "invensense,icm20689", + .data = (void *)INV_ICM20689 + }, + { + .compatible = "invensense,icm20602", + .data = (void *)INV_ICM20602 + }, + { + .compatible = "invensense,icm20690", + .data = (void *)INV_ICM20690 + }, + { + .compatible = "invensense,iam20680", + .data = (void *)INV_IAM20680 + }, + { } +}; +MODULE_DEVICE_TABLE(of, inv_of_match); + static const struct acpi_device_id inv_acpi_match[] = { {"INVN6000", INV_MPU6000}, { }, @@ -93,6 +142,7 @@ static struct spi_driver inv_mpu_driver = { .probe = inv_mpu_probe, .id_table = inv_mpu_id, .driver = { + .of_match_table = inv_of_match, .acpi_match_table = ACPI_PTR(inv_acpi_match), .name = "inv-mpu6000-spi", .pm = &inv_mpu_pmops, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 5199fe790c30..f7b5a70be30f 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -3,11 +3,13 @@ * Copyright (C) 2012 Invensense, Inc. */ +#include <linux/pm_runtime.h> #include "inv_mpu_iio.h" -static void inv_scan_query_mpu6050(struct iio_dev *indio_dev) +static unsigned int inv_scan_query_mpu6050(struct iio_dev *indio_dev) { struct inv_mpu6050_state *st = iio_priv(indio_dev); + unsigned int mask; st->chip_config.gyro_fifo_enable = test_bit(INV_MPU6050_SCAN_GYRO_X, @@ -27,17 +29,28 @@ static void inv_scan_query_mpu6050(struct iio_dev *indio_dev) st->chip_config.temp_fifo_enable = test_bit(INV_MPU6050_SCAN_TEMP, indio_dev->active_scan_mask); + + mask = 0; + if (st->chip_config.gyro_fifo_enable) + mask |= INV_MPU6050_SENSOR_GYRO; + if (st->chip_config.accl_fifo_enable) + mask |= INV_MPU6050_SENSOR_ACCL; + if (st->chip_config.temp_fifo_enable) + mask |= INV_MPU6050_SENSOR_TEMP; + + return mask; } -static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev) +static unsigned int inv_scan_query_mpu9x50(struct iio_dev *indio_dev) { struct inv_mpu6050_state *st = iio_priv(indio_dev); + unsigned int mask; - inv_scan_query_mpu6050(indio_dev); + mask = inv_scan_query_mpu6050(indio_dev); /* no magnetometer if i2c auxiliary bus is used */ if (st->magn_disabled) - return; + return mask; st->chip_config.magn_fifo_enable = test_bit(INV_MPU9X50_SCAN_MAGN_X, @@ -46,9 +59,13 @@ static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev) indio_dev->active_scan_mask) || test_bit(INV_MPU9X50_SCAN_MAGN_Z, indio_dev->active_scan_mask); + if (st->chip_config.magn_fifo_enable) + mask |= INV_MPU6050_SENSOR_MAGN; + + return mask; } -static void inv_scan_query(struct iio_dev *indio_dev) +static unsigned int inv_scan_query(struct iio_dev *indio_dev) { struct inv_mpu6050_state *st = iio_priv(indio_dev); @@ -84,6 +101,54 @@ static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st) return skip_samples; } +int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable) +{ + uint8_t d; + int ret; + + if (enable) { + st->it_timestamp = 0; + /* reset FIFO */ + d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST; + ret = regmap_write(st->map, st->reg->user_ctrl, d); + if (ret) + return ret; + /* enable sensor output to FIFO */ + d = 0; + if (st->chip_config.gyro_fifo_enable) + d |= INV_MPU6050_BITS_GYRO_OUT; + if (st->chip_config.accl_fifo_enable) + d |= INV_MPU6050_BIT_ACCEL_OUT; + if (st->chip_config.temp_fifo_enable) + d |= INV_MPU6050_BIT_TEMP_OUT; + if (st->chip_config.magn_fifo_enable) + d |= INV_MPU6050_BIT_SLAVE_0; + ret = regmap_write(st->map, st->reg->fifo_en, d); + if (ret) + return ret; + /* enable FIFO reading */ + d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_EN; + ret = regmap_write(st->map, st->reg->user_ctrl, d); + if (ret) + return ret; + /* enable interrupt */ + ret = regmap_write(st->map, st->reg->int_enable, + INV_MPU6050_BIT_DATA_RDY_EN); + } else { + ret = regmap_write(st->map, st->reg->int_enable, 0); + if (ret) + return ret; + ret = regmap_write(st->map, st->reg->fifo_en, 0); + if (ret) + return ret; + /* restore user_ctrl for disabling FIFO reading */ + ret = regmap_write(st->map, st->reg->user_ctrl, + st->chip_config.user_ctrl); + } + + return ret; +} + /** * inv_mpu6050_set_enable() - enable chip functions. * @indio_dev: Device driver instance. @@ -92,84 +157,43 @@ static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st) static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable) { struct inv_mpu6050_state *st = iio_priv(indio_dev); - uint8_t d; + struct device *pdev = regmap_get_device(st->map); + unsigned int scan; int result; if (enable) { - result = inv_mpu6050_set_power_itg(st, true); - if (result) + scan = inv_scan_query(indio_dev); + result = pm_runtime_get_sync(pdev); + if (result < 0) { + pm_runtime_put_noidle(pdev); return result; - inv_scan_query(indio_dev); - if (st->chip_config.gyro_fifo_enable) { - result = inv_mpu6050_switch_engine(st, true, - INV_MPU6050_BIT_PWR_GYRO_STBY); - if (result) - goto error_power_off; - } - if (st->chip_config.accl_fifo_enable) { - result = inv_mpu6050_switch_engine(st, true, - INV_MPU6050_BIT_PWR_ACCL_STBY); - if (result) - goto error_gyro_off; } - if (st->chip_config.magn_fifo_enable) { - d = st->chip_config.user_ctrl | - INV_MPU6050_BIT_I2C_MST_EN; - result = regmap_write(st->map, st->reg->user_ctrl, d); - if (result) - goto error_accl_off; - st->chip_config.user_ctrl = d; - } - st->skip_samples = inv_compute_skip_samples(st); - result = inv_reset_fifo(indio_dev); + /* + * In case autosuspend didn't trigger, turn off first not + * required sensors. + */ + result = inv_mpu6050_switch_engine(st, false, ~scan); if (result) - goto error_magn_off; - } else { - result = regmap_write(st->map, st->reg->fifo_en, 0); - if (result) - goto error_magn_off; - - result = regmap_write(st->map, st->reg->int_enable, 0); - if (result) - goto error_magn_off; - - d = st->chip_config.user_ctrl & ~INV_MPU6050_BIT_I2C_MST_EN; - result = regmap_write(st->map, st->reg->user_ctrl, d); - if (result) - goto error_magn_off; - st->chip_config.user_ctrl = d; - - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_ACCL_STBY); + goto error_power_off; + result = inv_mpu6050_switch_engine(st, true, scan); if (result) - goto error_accl_off; - - result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_GYRO_STBY); + goto error_power_off; + st->skip_samples = inv_compute_skip_samples(st); + result = inv_mpu6050_prepare_fifo(st, true); if (result) - goto error_gyro_off; - - result = inv_mpu6050_set_power_itg(st, false); + goto error_power_off; + } else { + result = inv_mpu6050_prepare_fifo(st, false); if (result) goto error_power_off; + pm_runtime_mark_last_busy(pdev); + pm_runtime_put_autosuspend(pdev); } return 0; -error_magn_off: - /* always restore user_ctrl to disable fifo properly */ - st->chip_config.user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN; - regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl); -error_accl_off: - if (st->chip_config.accl_fifo_enable) - inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_ACCL_STBY); -error_gyro_off: - if (st->chip_config.gyro_fifo_enable) - inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_GYRO_STBY); error_power_off: - inv_mpu6050_set_power_itg(st, false); + pm_runtime_put_autosuspend(pdev); return result; } diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index 9c3486a8134f..f2113a63721a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -230,8 +230,8 @@ enum st_lsm6dsx_ext_sensor_id { * @i2c_addr: I2c slave address list. * @wai: Wai address info. * @id: external sensor id. - * @odr: Output data rate of the sensor [Hz]. - * @gain: Configured sensor sensitivity. + * @odr_table: Output data rate of the sensor [Hz]. + * @fs_table: Configured sensor sensitivity table depending on full scale. * @temp_comp: Temperature compensation register info (addr + mask). * @pwr_table: Power on register info (addr + mask). * @off_canc: Offset cancellation register info (addr + mask). diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index eea555617d4a..95ddd19d1aa7 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -464,9 +464,10 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor, len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3); err = st_lsm6dsx_shub_read(sensor, ch->address, data, len); + if (err < 0) + return err; - st_lsm6dsx_shub_set_enable(sensor, false); - + err = st_lsm6dsx_shub_set_enable(sensor, false); if (err < 0) return err; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 65ff0d067018..eac63c1bb8da 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -301,11 +301,14 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct iio_dev *indio_dev = file->private_data; - char buf[20]; unsigned val = 0; - ssize_t len; int ret; + if (*ppos > 0) + return simple_read_from_buffer(userbuf, count, ppos, + indio_dev->read_buf, + indio_dev->read_buf_len); + ret = indio_dev->info->debugfs_reg_access(indio_dev, indio_dev->cached_reg_addr, 0, &val); @@ -314,9 +317,13 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, return ret; } - len = snprintf(buf, sizeof(buf), "0x%X\n", val); + indio_dev->read_buf_len = snprintf(indio_dev->read_buf, + sizeof(indio_dev->read_buf), + "0x%X\n", val); - return simple_read_from_buffer(userbuf, count, ppos, buf, len); + return simple_read_from_buffer(userbuf, count, ppos, + indio_dev->read_buf, + indio_dev->read_buf_len); } static ssize_t iio_debugfs_write_reg(struct file *file, @@ -769,17 +776,18 @@ static ssize_t iio_read_channel_info_avail(struct device *dev, } /** - * iio_str_to_fixpoint() - Parse a fixed-point number from a string + * __iio_str_to_fixpoint() - Parse a fixed-point number from a string * @str: The string to parse * @fract_mult: Multiplier for the first decimal place, should be a power of 10 * @integer: The integer part of the number * @fract: The fractional part of the number + * @scale_db: True if this should parse as dB * * Returns 0 on success, or a negative error code if the string could not be * parsed. */ -int iio_str_to_fixpoint(const char *str, int fract_mult, - int *integer, int *fract) +static int __iio_str_to_fixpoint(const char *str, int fract_mult, + int *integer, int *fract, bool scale_db) { int i = 0, f = 0; bool integer_part = true, negative = false; @@ -810,6 +818,14 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, break; else return -EINVAL; + } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { + /* Ignore the dB suffix */ + str += sizeof(" dB") - 1; + continue; + } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { + /* Ignore the dB suffix */ + str += sizeof("dB") - 1; + continue; } else if (*str == '.' && integer_part) { integer_part = false; } else { @@ -830,6 +846,22 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, return 0; } + +/** + * iio_str_to_fixpoint() - Parse a fixed-point number from a string + * @str: The string to parse + * @fract_mult: Multiplier for the first decimal place, should be a power of 10 + * @integer: The integer part of the number + * @fract: The fractional part of the number + * + * Returns 0 on success, or a negative error code if the string could not be + * parsed. + */ +int iio_str_to_fixpoint(const char *str, int fract_mult, + int *integer, int *fract) +{ + return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); +} EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); static ssize_t iio_write_channel_info(struct device *dev, @@ -842,6 +874,7 @@ static ssize_t iio_write_channel_info(struct device *dev, int ret, fract_mult = 100000; int integer, fract = 0; bool is_char = false; + bool scale_db = false; /* Assumes decimal - precision based on number of digits */ if (!indio_dev->info->write_raw) @@ -853,6 +886,9 @@ static ssize_t iio_write_channel_info(struct device *dev, case IIO_VAL_INT: fract_mult = 0; break; + case IIO_VAL_INT_PLUS_MICRO_DB: + scale_db = true; + /* fall through */ case IIO_VAL_INT_PLUS_MICRO: fract_mult = 100000; break; @@ -877,6 +913,10 @@ static ssize_t iio_write_channel_info(struct device *dev, if (ret) return ret; } + ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, + scale_db); + if (ret) + return ret; ret = indio_dev->info->write_raw(indio_dev, this_attr->c, integer, fract, this_attr->address); diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 9968f982fbc7..74970f18a93b 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -43,6 +43,16 @@ config ADUX1020 To compile this driver as a module, choose M here: the module will be called adux1020. +config AL3010 + tristate "AL3010 ambient light sensor" + depends on I2C + help + Say Y here if you want to build a driver for the Dyna Image AL3010 + ambient light sensor. + + To compile this driver as a module, choose M here: the + module will be called al3010. + config AL3320A tristate "AL3320A ambient light sensor" depends on I2C @@ -159,6 +169,17 @@ config IIO_CROS_EC_LIGHT_PROX To compile this driver as a module, choose M here: the module will be called cros_ec_light_prox. +config GP2AP002 + tristate "Sharp GP2AP002 Proximity/ALS sensor" + depends on I2C + select REGMAP + help + Say Y here if you have a Sharp GP2AP002 proximity/ALS combo-chip + hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called gp2ap002. + config GP2AP020A00F tristate "Sharp GP2AP020A00F Proximity/ALS sensor" depends on I2C diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile index c98d1cefb861..5c1ebaf578fb 100644 --- a/drivers/iio/light/Makefile +++ b/drivers/iio/light/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_ACPI_ALS) += acpi-als.o obj-$(CONFIG_ADJD_S311) += adjd_s311.o obj-$(CONFIG_ADUX1020) += adux1020.o +obj-$(CONFIG_AL3010) += al3010.o obj-$(CONFIG_AL3320A) += al3320a.o obj-$(CONFIG_APDS9300) += apds9300.o obj-$(CONFIG_APDS9960) += apds9960.o @@ -18,6 +19,7 @@ obj-$(CONFIG_CM3323) += cm3323.o obj-$(CONFIG_CM3605) += cm3605.o obj-$(CONFIG_CM36651) += cm36651.o obj-$(CONFIG_IIO_CROS_EC_LIGHT_PROX) += cros_ec_light_prox.o +obj-$(CONFIG_GP2AP002) += gp2ap002.o obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c new file mode 100644 index 000000000000..b1ed7658cc46 --- /dev/null +++ b/drivers/iio/light/al3010.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AL3010 - Dyna Image Ambient Light Sensor + * + * Copyright (c) 2014, Intel Corporation. + * Copyright (c) 2016, Dyna-Image Corp. + * Copyright (c) 2020, David Heidelberg, Michał Mirosław, Dmitry Osipenko + * + * IIO driver for AL3010 (7-bit I2C slave address 0x1C). + * + * TODO: interrupt support, thresholds + * When the driver will get support for interrupt handling, then interrupt + * will need to be disabled before turning sensor OFF in order to avoid + * potential races with the interrupt handling. + */ + +#include <linux/bitfield.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> + +#define AL3010_DRV_NAME "al3010" + +#define AL3010_REG_SYSTEM 0x00 +#define AL3010_REG_DATA_LOW 0x0c +#define AL3010_REG_CONFIG 0x10 + +#define AL3010_CONFIG_DISABLE 0x00 +#define AL3010_CONFIG_ENABLE 0x01 + +#define AL3010_GAIN_MASK GENMASK(6,4) + +#define AL3010_SCALE_AVAILABLE "1.1872 0.2968 0.0742 0.018" + +enum al3xxxx_range { + AL3XXX_RANGE_1, /* 77806 lx */ + AL3XXX_RANGE_2, /* 19542 lx */ + AL3XXX_RANGE_3, /* 4863 lx */ + AL3XXX_RANGE_4 /* 1216 lx */ +}; + +static const int al3010_scales[][2] = { + {0, 1187200}, {0, 296800}, {0, 74200}, {0, 18600} +}; + +struct al3010_data { + struct i2c_client *client; +}; + +static const struct iio_chan_spec al3010_channels[] = { + { + .type = IIO_LIGHT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + } +}; + +static IIO_CONST_ATTR(in_illuminance_scale_available, AL3010_SCALE_AVAILABLE); + +static struct attribute *al3010_attributes[] = { + &iio_const_attr_in_illuminance_scale_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group al3010_attribute_group = { + .attrs = al3010_attributes, +}; + +static int al3010_set_pwr(struct i2c_client *client, bool pwr) +{ + u8 val = pwr ? AL3010_CONFIG_ENABLE : AL3010_CONFIG_DISABLE; + return i2c_smbus_write_byte_data(client, AL3010_REG_SYSTEM, val); +} + +static void al3010_set_pwr_off(void *_data) +{ + struct al3010_data *data = _data; + + al3010_set_pwr(data->client, false); +} + +static int al3010_init(struct al3010_data *data) +{ + int ret; + + ret = al3010_set_pwr(data->client, true); + + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(data->client, AL3010_REG_CONFIG, + FIELD_PREP(AL3010_GAIN_MASK, + AL3XXX_RANGE_3)); + if (ret < 0) + return ret; + + return 0; +} + +static int al3010_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct al3010_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + /* + * ALS ADC value is stored in two adjacent registers: + * - low byte of output is stored at AL3010_REG_DATA_LOW + * - high byte of output is stored at AL3010_REG_DATA_LOW + 1 + */ + ret = i2c_smbus_read_word_data(data->client, + AL3010_REG_DATA_LOW); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + ret = i2c_smbus_read_byte_data(data->client, + AL3010_REG_CONFIG); + if (ret < 0) + return ret; + + ret = FIELD_GET(AL3010_GAIN_MASK, ret); + *val = al3010_scales[ret][0]; + *val2 = al3010_scales[ret][1]; + + return IIO_VAL_INT_PLUS_MICRO; + } + return -EINVAL; +} + +static int al3010_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct al3010_data *data = iio_priv(indio_dev); + int i; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + for (i = 0; i < ARRAY_SIZE(al3010_scales); i++) { + if (val != al3010_scales[i][0] || + val2 != al3010_scales[i][1]) + continue; + + return i2c_smbus_write_byte_data(data->client, + AL3010_REG_CONFIG, + FIELD_PREP(AL3010_GAIN_MASK, i)); + } + break; + } + return -EINVAL; +} + +static const struct iio_info al3010_info = { + .read_raw = al3010_read_raw, + .write_raw = al3010_write_raw, + .attrs = &al3010_attribute_group, +}; + +static int al3010_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct al3010_data *data; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + data->client = client; + + indio_dev->dev.parent = &client->dev; + indio_dev->info = &al3010_info; + indio_dev->name = AL3010_DRV_NAME; + indio_dev->channels = al3010_channels; + indio_dev->num_channels = ARRAY_SIZE(al3010_channels); + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = al3010_init(data); + if (ret < 0) { + dev_err(&client->dev, "al3010 chip init failed\n"); + return ret; + } + + ret = devm_add_action_or_reset(&client->dev, + al3010_set_pwr_off, + data); + if (ret < 0) + return ret; + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static int __maybe_unused al3010_suspend(struct device *dev) +{ + return al3010_set_pwr(to_i2c_client(dev), false); +} + +static int __maybe_unused al3010_resume(struct device *dev) +{ + return al3010_set_pwr(to_i2c_client(dev), true); +} + +static SIMPLE_DEV_PM_OPS(al3010_pm_ops, al3010_suspend, al3010_resume); + +static const struct i2c_device_id al3010_id[] = { + {"al3010", }, + {} +}; +MODULE_DEVICE_TABLE(i2c, al3010_id); + +static const struct of_device_id al3010_of_match[] = { + { .compatible = "dynaimage,al3010", }, + {}, +}; +MODULE_DEVICE_TABLE(of, al3010_of_match); + +static struct i2c_driver al3010_driver = { + .driver = { + .name = AL3010_DRV_NAME, + .of_match_table = al3010_of_match, + .pm = &al3010_pm_ops, + }, + .probe = al3010_probe, + .id_table = al3010_id, +}; +module_i2c_driver(al3010_driver); + +MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>"); +MODULE_AUTHOR("David Heidelberg <david@ixit.cz>"); +MODULE_DESCRIPTION("AL3010 Ambient Light Sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c index a21aa99e74e4..20ed0a73c390 100644 --- a/drivers/iio/light/al3320a.c +++ b/drivers/iio/light/al3320a.c @@ -7,11 +7,15 @@ * IIO driver for AL3320A (7-bit I2C slave address 0x1C). * * TODO: interrupt support, thresholds + * When the driver will get support for interrupt handling, then interrupt + * will need to be disabled before turning sensor OFF in order to avoid + * potential races with the interrupt handling. */ -#include <linux/module.h> -#include <linux/init.h> +#include <linux/bitfield.h> #include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -36,8 +40,7 @@ #define AL3320A_CONFIG_DISABLE 0x00 #define AL3320A_CONFIG_ENABLE 0x01 -#define AL3320A_GAIN_SHIFT 1 -#define AL3320A_GAIN_MASK (BIT(2) | BIT(1)) +#define AL3320A_GAIN_MASK GENMASK(2, 1) /* chip params default values */ #define AL3320A_DEFAULT_MEAN_TIME 4 @@ -79,18 +82,31 @@ static const struct attribute_group al3320a_attribute_group = { .attrs = al3320a_attributes, }; +static int al3320a_set_pwr(struct i2c_client *client, bool pwr) +{ + u8 val = pwr ? AL3320A_CONFIG_ENABLE : AL3320A_CONFIG_DISABLE; + return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG, val); +} + +static void al3320a_set_pwr_off(void *_data) +{ + struct al3320a_data *data = _data; + + al3320a_set_pwr(data->client, false); +} + static int al3320a_init(struct al3320a_data *data) { int ret; - /* power on */ - ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG, - AL3320A_CONFIG_ENABLE); + ret = al3320a_set_pwr(data->client, true); + if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG_RANGE, - AL3320A_RANGE_3 << AL3320A_GAIN_SHIFT); + FIELD_PREP(AL3320A_GAIN_MASK, + AL3320A_RANGE_3)); if (ret < 0) return ret; @@ -133,7 +149,7 @@ static int al3320a_read_raw(struct iio_dev *indio_dev, if (ret < 0) return ret; - ret = (ret & AL3320A_GAIN_MASK) >> AL3320A_GAIN_SHIFT; + ret = FIELD_GET(AL3320A_GAIN_MASK, ret); *val = al3320a_scales[ret][0]; *val2 = al3320a_scales[ret][1]; @@ -152,11 +168,13 @@ static int al3320a_write_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_SCALE: for (i = 0; i < ARRAY_SIZE(al3320a_scales); i++) { - if (val == al3320a_scales[i][0] && - val2 == al3320a_scales[i][1]) - return i2c_smbus_write_byte_data(data->client, + if (val != al3320a_scales[i][0] || + val2 != al3320a_scales[i][1]) + continue; + + return i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG_RANGE, - i << AL3320A_GAIN_SHIFT); + FIELD_PREP(AL3320A_GAIN_MASK, i)); } break; } @@ -196,27 +214,47 @@ static int al3320a_probe(struct i2c_client *client, dev_err(&client->dev, "al3320a chip init failed\n"); return ret; } + + ret = devm_add_action_or_reset(&client->dev, + al3320a_set_pwr_off, + data); + if (ret < 0) + return ret; + return devm_iio_device_register(&client->dev, indio_dev); } -static int al3320a_remove(struct i2c_client *client) +static int __maybe_unused al3320a_suspend(struct device *dev) +{ + return al3320a_set_pwr(to_i2c_client(dev), false); +} + +static int __maybe_unused al3320a_resume(struct device *dev) { - return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG, - AL3320A_CONFIG_DISABLE); + return al3320a_set_pwr(to_i2c_client(dev), true); } +static SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend, al3320a_resume); + static const struct i2c_device_id al3320a_id[] = { {"al3320a", 0}, {} }; MODULE_DEVICE_TABLE(i2c, al3320a_id); +static const struct of_device_id al3320a_of_match[] = { + { .compatible = "dynaimage,al3320a", }, + {}, +}; +MODULE_DEVICE_TABLE(of, al3320a_of_match); + static struct i2c_driver al3320a_driver = { .driver = { .name = AL3320A_DRV_NAME, + .of_match_table = al3320a_of_match, + .pm = &al3320a_pm_ops, }, .probe = al3320a_probe, - .remove = al3320a_remove, .id_table = al3320a_id, }; diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c new file mode 100644 index 000000000000..b7ef16b28280 --- /dev/null +++ b/drivers/iio/light/gp2ap002.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * These are the two Sharp GP2AP002 variants supported by this driver: + * GP2AP002A00F Ambient Light and Proximity Sensor + * GP2AP002S00F Proximity Sensor + * + * Copyright (C) 2020 Linaro Ltd. + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * Based partly on the code in Sony Ericssons GP2AP00200F driver by + * Courtney Cavin and Oskar Andero in drivers/input/misc/gp2ap002a00f.c + * Based partly on a Samsung misc driver submitted by + * Donggeun Kim & Minkyu Kang in 2011: + * https://lore.kernel.org/lkml/1315556546-7445-1-git-send-email-dg77.kim@samsung.com/ + * Based partly on a submission by + * Jonathan Bakker and Paweł Chmiel in january 2019: + * https://lore.kernel.org/linux-input/20190125175045.22576-1-pawel.mikolaj.chmiel@gmail.com/ + * Based partly on code from the Samsung GT-S7710 by <mjchen@sta.samsung.com> + * Based partly on the code in LG Electronics GP2AP00200F driver by + * Kenobi Lee <sungyoung.lee@lge.com> and EunYoung Cho <ey.cho@lge.com> + */ +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/events.h> +#include <linux/iio/consumer.h> /* To get our ADC channel */ +#include <linux/iio/types.h> /* To deal with our ADC channel */ +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/interrupt.h> +#include <linux/bits.h> +#include <linux/math64.h> +#include <linux/pm.h> + +#define GP2AP002_PROX_CHANNEL 0 +#define GP2AP002_ALS_CHANNEL 1 + +/* ------------------------------------------------------------------------ */ +/* ADDRESS SYMBOL DATA Init R/W */ +/* D7 D6 D5 D4 D3 D2 D1 D0 */ +/* ------------------------------------------------------------------------ */ +/* 0 PROX X X X X X X X VO H'00 R */ +/* 1 GAIN X X X X LED0 X X X H'00 W */ +/* 2 HYS HYSD HYSC1 HYSC0 X HYSF3 HYSF2 HYSF1 HYSF0 H'00 W */ +/* 3 CYCLE X X CYCL2 CYCL1 CYCL0 OSC2 X X H'00 W */ +/* 4 OPMOD X X X ASD X X VCON SSD H'00 W */ +/* 6 CON X X X OCON1 OCON0 X X X H'00 W */ +/* ------------------------------------------------------------------------ */ +/* VO :Proximity sensing result(0: no detection, 1: detection) */ +/* LED0 :Select switch for LED driver's On-registence(0:2x higher, 1:normal)*/ +/* HYSD/HYSF :Adjusts the receiver sensitivity */ +/* OSC :Select switch internal clocl frequency hoppling(0:effective) */ +/* CYCL :Determine the detection cycle(typically 8ms, up to 128x) */ +/* SSD :Software Shutdown function(0:shutdown, 1:operating) */ +/* VCON :VOUT output method control(0:normal, 1:interrupt) */ +/* ASD :Select switch for analog sleep function(0:ineffective, 1:effective)*/ +/* OCON :Select switch for enabling/disabling VOUT (00:enable, 11:disable) */ + +#define GP2AP002_PROX 0x00 +#define GP2AP002_GAIN 0x01 +#define GP2AP002_HYS 0x02 +#define GP2AP002_CYCLE 0x03 +#define GP2AP002_OPMOD 0x04 +#define GP2AP002_CON 0x06 + +#define GP2AP002_PROX_VO_DETECT BIT(0) + +/* Setting this bit to 0 means 2x higher LED resistance */ +#define GP2AP002_GAIN_LED_NORMAL BIT(3) + +/* + * These bits adjusts the proximity sensitivity, determining characteristics + * of the detection distance and its hysteresis. + */ +#define GP2AP002_HYS_HYSD_SHIFT 7 +#define GP2AP002_HYS_HYSD_MASK BIT(7) +#define GP2AP002_HYS_HYSC_SHIFT 5 +#define GP2AP002_HYS_HYSC_MASK GENMASK(6, 5) +#define GP2AP002_HYS_HYSF_SHIFT 0 +#define GP2AP002_HYS_HYSF_MASK GENMASK(3, 0) +#define GP2AP002_HYS_MASK (GP2AP002_HYS_HYSD_MASK | \ + GP2AP002_HYS_HYSC_MASK | \ + GP2AP002_HYS_HYSF_MASK) + +/* + * These values determine the detection cycle response time + * 0: 8ms, 1: 16ms, 2: 32ms, 3: 64ms, 4: 128ms, + * 5: 256ms, 6: 512ms, 7: 1024ms + */ +#define GP2AP002_CYCLE_CYCL_SHIFT 3 +#define GP2AP002_CYCLE_CYCL_MASK GENMASK(5, 3) + +/* + * Select switch for internal clock frequency hopping + * 0: effective, + * 1: ineffective + */ +#define GP2AP002_CYCLE_OSC_EFFECTIVE 0 +#define GP2AP002_CYCLE_OSC_INEFFECTIVE BIT(2) +#define GP2AP002_CYCLE_OSC_MASK BIT(2) + +/* Analog sleep effective */ +#define GP2AP002_OPMOD_ASD BIT(4) +/* Enable chip */ +#define GP2AP002_OPMOD_SSD_OPERATING BIT(0) +/* IRQ mode */ +#define GP2AP002_OPMOD_VCON_IRQ BIT(1) +#define GP2AP002_OPMOD_MASK (BIT(0) | BIT(1) | BIT(4)) + +/* + * Select switch for enabling/disabling Vout pin + * 0: enable + * 2: force to go Low + * 3: force to go High + */ +#define GP2AP002_CON_OCON_SHIFT 3 +#define GP2AP002_CON_OCON_ENABLE (0x0 << GP2AP002_CON_OCON_SHIFT) +#define GP2AP002_CON_OCON_LOW (0x2 << GP2AP002_CON_OCON_SHIFT) +#define GP2AP002_CON_OCON_HIGH (0x3 << GP2AP002_CON_OCON_SHIFT) +#define GP2AP002_CON_OCON_MASK (0x3 << GP2AP002_CON_OCON_SHIFT) + +/** + * struct gp2ap002 - GP2AP002 state + * @map: regmap pointer for the i2c regmap + * @dev: pointer to parent device + * @vdd: regulator controlling VDD + * @vio: regulator controlling VIO + * @alsout: IIO ADC channel to convert the ALSOUT signal + * @hys_far: hysteresis control from device tree + * @hys_close: hysteresis control from device tree + * @is_gp2ap002s00f: this is the GP2AP002F variant of the chip + * @irq: the IRQ line used by this device + * @enabled: we cannot read the status of the hardware so we need to + * keep track of whether the event is enabled using this state variable + */ +struct gp2ap002 { + struct regmap *map; + struct device *dev; + struct regulator *vdd; + struct regulator *vio; + struct iio_channel *alsout; + u8 hys_far; + u8 hys_close; + bool is_gp2ap002s00f; + int irq; + bool enabled; +}; + +static irqreturn_t gp2ap002_prox_irq(int irq, void *d) +{ + struct iio_dev *indio_dev = d; + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + u64 ev; + int val; + int ret; + + ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val); + if (ret) { + dev_err(gp2ap002->dev, "error reading proximity\n"); + goto err_retrig; + } + + if (val & GP2AP002_PROX_VO_DETECT) { + /* Close */ + dev_dbg(gp2ap002->dev, "close\n"); + ret = regmap_write(gp2ap002->map, GP2AP002_HYS, + gp2ap002->hys_far); + if (ret) + dev_err(gp2ap002->dev, + "error setting up proximity hysteresis\n"); + ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, GP2AP002_PROX_CHANNEL, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING); + } else { + /* Far */ + dev_dbg(gp2ap002->dev, "far\n"); + ret = regmap_write(gp2ap002->map, GP2AP002_HYS, + gp2ap002->hys_close); + if (ret) + dev_err(gp2ap002->dev, + "error setting up proximity hysteresis\n"); + ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, GP2AP002_PROX_CHANNEL, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING); + } + iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev)); + + /* + * After changing hysteresis, we need to wait for one detection + * cycle to see if anything changed, or we will just trigger the + * previous interrupt again. A detection cycle depends on the CYCLE + * register, we are hard-coding ~8 ms in probe() so wait some more + * than this, 20-30 ms. + */ + usleep_range(20000, 30000); + +err_retrig: + ret = regmap_write(gp2ap002->map, GP2AP002_CON, + GP2AP002_CON_OCON_ENABLE); + if (ret) + dev_err(gp2ap002->dev, "error setting up VOUT control\n"); + + return IRQ_HANDLED; +} + +/* + * This array maps current and lux. + * + * Ambient light sensing range is 3 to 55000 lux. + * + * This mapping is based on the following formula. + * illuminance = 10 ^ (current[mA] / 10) + * + * When the ADC measures 0, return 0 lux. + */ +static const u16 gp2ap002_illuminance_table[] = { + 0, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 25, 32, 40, 50, 63, 79, + 100, 126, 158, 200, 251, 316, 398, 501, 631, 794, 1000, 1259, 1585, + 1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000, 12589, 15849, 19953, + 25119, 31623, 39811, 50119, +}; + +static int gp2ap002_get_lux(struct gp2ap002 *gp2ap002) +{ + int ret, res; + u16 lux; + + ret = iio_read_channel_processed(gp2ap002->alsout, &res); + if (ret < 0) + return ret; + + dev_dbg(gp2ap002->dev, "read %d mA from ADC\n", res); + + /* ensure we don't under/overflow */ + res = clamp(res, 0, (int)ARRAY_SIZE(gp2ap002_illuminance_table) - 1); + lux = gp2ap002_illuminance_table[res]; + + return (int)lux; +} + +static int gp2ap002_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_LIGHT: + ret = gp2ap002_get_lux(gp2ap002); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int gp2ap002_init(struct gp2ap002 *gp2ap002) +{ + int ret; + + /* Set up the IR LED resistance */ + ret = regmap_write(gp2ap002->map, GP2AP002_GAIN, + GP2AP002_GAIN_LED_NORMAL); + if (ret) { + dev_err(gp2ap002->dev, "error setting up LED gain\n"); + return ret; + } + ret = regmap_write(gp2ap002->map, GP2AP002_HYS, gp2ap002->hys_far); + if (ret) { + dev_err(gp2ap002->dev, + "error setting up proximity hysteresis\n"); + return ret; + } + + /* Disable internal frequency hopping */ + ret = regmap_write(gp2ap002->map, GP2AP002_CYCLE, + GP2AP002_CYCLE_OSC_INEFFECTIVE); + if (ret) { + dev_err(gp2ap002->dev, + "error setting up internal frequency hopping\n"); + return ret; + } + + /* Enable chip and IRQ, disable analog sleep */ + ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD, + GP2AP002_OPMOD_SSD_OPERATING | + GP2AP002_OPMOD_VCON_IRQ); + if (ret) { + dev_err(gp2ap002->dev, "error setting up operation mode\n"); + return ret; + } + + /* Interrupt on VOUT enabled */ + ret = regmap_write(gp2ap002->map, GP2AP002_CON, + GP2AP002_CON_OCON_ENABLE); + if (ret) + dev_err(gp2ap002->dev, "error setting up VOUT control\n"); + + return ret; +} + +static int gp2ap002_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + + /* + * We just keep track of this internally, as it is not possible to + * query the hardware. + */ + return gp2ap002->enabled; +} + +static int gp2ap002_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + + if (state) { + /* + * This will bring the regulators up (unless they are on + * already) and reintialize the sensor by using runtime_pm + * callbacks. + */ + pm_runtime_get_sync(gp2ap002->dev); + gp2ap002->enabled = true; + } else { + pm_runtime_mark_last_busy(gp2ap002->dev); + pm_runtime_put_autosuspend(gp2ap002->dev); + gp2ap002->enabled = false; + } + + return 0; +} + +static const struct iio_info gp2ap002_info = { + .read_raw = gp2ap002_read_raw, + .read_event_config = gp2ap002_read_event_config, + .write_event_config = gp2ap002_write_event_config, +}; + +static const struct iio_event_spec gp2ap002_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +static const struct iio_chan_spec gp2ap002_channels[] = { + { + .type = IIO_PROXIMITY, + .event_spec = gp2ap002_events, + .num_event_specs = ARRAY_SIZE(gp2ap002_events), + }, + { + .type = IIO_LIGHT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .channel = GP2AP002_ALS_CHANNEL, + }, +}; + +/* + * We need a special regmap because this hardware expects to + * write single bytes to registers but read a 16bit word on some + * variants and discard the lower 8 bits so combine + * i2c_smbus_read_word_data() with i2c_smbus_write_byte_data() + * selectively like this. + */ +static int gp2ap002_regmap_i2c_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + ret = i2c_smbus_read_word_data(i2c, reg); + if (ret < 0) + return ret; + + *val = (ret >> 8) & 0xFF; + + return 0; +} + +static int gp2ap002_regmap_i2c_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + return i2c_smbus_write_byte_data(i2c, reg, val); +} + +static struct regmap_bus gp2ap002_regmap_bus = { + .reg_read = gp2ap002_regmap_i2c_read, + .reg_write = gp2ap002_regmap_i2c_write, +}; + +static int gp2ap002_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct gp2ap002 *gp2ap002; + struct iio_dev *indio_dev; + struct device *dev = &client->dev; + enum iio_chan_type ch_type; + static const struct regmap_config config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = GP2AP002_CON, + }; + struct regmap *regmap; + int num_chan; + const char *compat; + u8 val; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*gp2ap002)); + if (!indio_dev) + return -ENOMEM; + i2c_set_clientdata(client, indio_dev); + + gp2ap002 = iio_priv(indio_dev); + gp2ap002->dev = dev; + + /* + * Check the device compatible like this makes it possible to use + * ACPI PRP0001 for registering the sensor using device tree + * properties. + */ + ret = device_property_read_string(dev, "compatible", &compat); + if (ret) { + dev_err(dev, "cannot check compatible\n"); + return ret; + } + gp2ap002->is_gp2ap002s00f = !strcmp(compat, "sharp,gp2ap002s00f"); + + regmap = devm_regmap_init(dev, &gp2ap002_regmap_bus, dev, &config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to register i2c regmap %d\n", + (int)PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + gp2ap002->map = regmap; + + /* + * The hysteresis settings are coded into the device tree as values + * to be written into the hysteresis register. The datasheet defines + * modes "A", "B1" and "B2" with fixed values to be use but vendor + * code trees for actual devices are tweaking these values and refer to + * modes named things like "B1.5". To be able to support any devices, + * we allow passing an arbitrary hysteresis setting for "near" and + * "far". + */ + + /* Check the device tree for the IR LED hysteresis */ + ret = device_property_read_u8(dev, "sharp,proximity-far-hysteresis", + &val); + if (ret) { + dev_err(dev, "failed to obtain proximity far setting\n"); + return ret; + } + dev_dbg(dev, "proximity far setting %02x\n", val); + gp2ap002->hys_far = val; + + ret = device_property_read_u8(dev, "sharp,proximity-close-hysteresis", + &val); + if (ret) { + dev_err(dev, "failed to obtain proximity close setting\n"); + return ret; + } + dev_dbg(dev, "proximity close setting %02x\n", val); + gp2ap002->hys_close = val; + + /* The GP2AP002A00F has a light sensor too */ + if (!gp2ap002->is_gp2ap002s00f) { + gp2ap002->alsout = devm_iio_channel_get(dev, "alsout"); + if (IS_ERR(gp2ap002->alsout)) { + if (PTR_ERR(gp2ap002->alsout) == -ENODEV) { + dev_err(dev, "no ADC, deferring...\n"); + return -EPROBE_DEFER; + } + dev_err(dev, "failed to get ALSOUT ADC channel\n"); + return PTR_ERR(gp2ap002->alsout); + } + ret = iio_get_channel_type(gp2ap002->alsout, &ch_type); + if (ret < 0) + return ret; + if (ch_type != IIO_CURRENT) { + dev_err(dev, + "wrong type of IIO channel specified for ALSOUT\n"); + return -EINVAL; + } + } + + gp2ap002->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(gp2ap002->vdd)) { + dev_err(dev, "failed to get VDD regulator\n"); + return PTR_ERR(gp2ap002->vdd); + } + gp2ap002->vio = devm_regulator_get(dev, "vio"); + if (IS_ERR(gp2ap002->vio)) { + dev_err(dev, "failed to get VIO regulator\n"); + return PTR_ERR(gp2ap002->vio); + } + + /* Operating voltage 2.4V .. 3.6V according to datasheet */ + ret = regulator_set_voltage(gp2ap002->vdd, 2400000, 3600000); + if (ret) { + dev_err(dev, "failed to sett VDD voltage\n"); + return ret; + } + + /* VIO should be between 1.65V and VDD */ + ret = regulator_get_voltage(gp2ap002->vdd); + if (ret < 0) { + dev_err(dev, "failed to get VDD voltage\n"); + return ret; + } + ret = regulator_set_voltage(gp2ap002->vio, 1650000, ret); + if (ret) { + dev_err(dev, "failed to set VIO voltage\n"); + return ret; + } + + ret = regulator_enable(gp2ap002->vdd); + if (ret) { + dev_err(dev, "failed to enable VDD regulator\n"); + return ret; + } + ret = regulator_enable(gp2ap002->vio); + if (ret) { + dev_err(dev, "failed to enable VIO regulator\n"); + goto out_disable_vdd; + } + + msleep(20); + + /* + * Initialize the device and signal to runtime PM that now we are + * definately up and using power. + */ + ret = gp2ap002_init(gp2ap002); + if (ret) { + dev_err(dev, "initialization failed\n"); + goto out_disable_vio; + } + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + gp2ap002->enabled = false; + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + gp2ap002_prox_irq, IRQF_ONESHOT, + "gp2ap002", indio_dev); + if (ret) { + dev_err(dev, "unable to request IRQ\n"); + goto out_disable_vio; + } + gp2ap002->irq = client->irq; + + /* + * As the device takes 20 ms + regulator delay to come up with a fresh + * measurement after power-on, do not shut it down unnecessarily. + * Set autosuspend to a one second. + */ + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put(dev); + + indio_dev->dev.parent = dev; + indio_dev->info = &gp2ap002_info; + indio_dev->name = "gp2ap002"; + indio_dev->channels = gp2ap002_channels; + /* Skip light channel for the proximity-only sensor */ + num_chan = ARRAY_SIZE(gp2ap002_channels); + if (gp2ap002->is_gp2ap002s00f) + num_chan--; + indio_dev->num_channels = num_chan; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = iio_device_register(indio_dev); + if (ret) + goto out_disable_pm; + dev_dbg(dev, "Sharp GP2AP002 probed successfully\n"); + + return 0; + +out_disable_pm: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); +out_disable_vio: + regulator_disable(gp2ap002->vio); +out_disable_vdd: + regulator_disable(gp2ap002->vdd); + return ret; +} + +static int gp2ap002_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + struct device *dev = &client->dev; + + pm_runtime_get_sync(dev); + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + iio_device_unregister(indio_dev); + regulator_disable(gp2ap002->vio); + regulator_disable(gp2ap002->vdd); + + return 0; +} + +static int __maybe_unused gp2ap002_runtime_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + int ret; + + /* Deactivate the IRQ */ + disable_irq(gp2ap002->irq); + + /* Disable chip and IRQ, everything off */ + ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD, 0x00); + if (ret) { + dev_err(gp2ap002->dev, "error setting up operation mode\n"); + return ret; + } + /* + * As these regulators may be shared, at least we are now in + * sleep even if the regulators aren't really turned off. + */ + regulator_disable(gp2ap002->vio); + regulator_disable(gp2ap002->vdd); + + return 0; +} + +static int __maybe_unused gp2ap002_runtime_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct gp2ap002 *gp2ap002 = iio_priv(indio_dev); + int ret; + + ret = regulator_enable(gp2ap002->vdd); + if (ret) { + dev_err(dev, "failed to enable VDD regulator in resume path\n"); + return ret; + } + ret = regulator_enable(gp2ap002->vio); + if (ret) { + dev_err(dev, "failed to enable VIO regulator in resume path\n"); + return ret; + } + + msleep(20); + + ret = gp2ap002_init(gp2ap002); + if (ret) { + dev_err(dev, "re-initialization failed\n"); + return ret; + } + + /* Re-activate the IRQ */ + enable_irq(gp2ap002->irq); + + return 0; +} + +static const struct dev_pm_ops gp2ap002_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(gp2ap002_runtime_suspend, + gp2ap002_runtime_resume, NULL) +}; + +static const struct i2c_device_id gp2ap002_id_table[] = { + { "gp2ap002", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, gp2ap002_id_table); + +static const struct of_device_id gp2ap002_of_match[] = { + { .compatible = "sharp,gp2ap002a00f" }, + { .compatible = "sharp,gp2ap002s00f" }, + { }, +}; +MODULE_DEVICE_TABLE(of, gp2ap002_of_match); + +static struct i2c_driver gp2ap002_driver = { + .driver = { + .name = "gp2ap002", + .of_match_table = gp2ap002_of_match, + .pm = &gp2ap002_dev_pm_ops, + }, + .probe = gp2ap002_probe, + .remove = gp2ap002_remove, + .id_table = gp2ap002_id_table, +}; +module_i2c_driver(gp2ap002_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("GP2AP002 ambient light and proximity sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index 4d70c5bf35da..7fbbce0d4bc7 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1390,6 +1390,12 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) mutex_lock(&data->lock); + err = iio_triggered_buffer_postenable(indio_dev); + if (err < 0) { + mutex_unlock(&data->lock); + return err; + } + /* * Enable triggers according to the scan_mask. Enabling either * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS @@ -1420,14 +1426,12 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) goto error_unlock; data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); - if (!data->buffer) { + if (!data->buffer) err = -ENOMEM; - goto error_unlock; - } - - err = iio_triggered_buffer_postenable(indio_dev); error_unlock: + if (err < 0) + iio_triggered_buffer_predisable(indio_dev); mutex_unlock(&data->lock); return err; @@ -1436,14 +1440,10 @@ error_unlock: static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev) { struct gp2ap020a00f_data *data = iio_priv(indio_dev); - int i, err; + int i, err = 0; mutex_lock(&data->lock); - err = iio_triggered_buffer_predisable(indio_dev); - if (err < 0) - goto error_unlock; - for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) { switch (i) { @@ -1465,7 +1465,8 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev) if (err == 0) kfree(data->buffer); -error_unlock: + iio_triggered_buffer_predisable(indio_dev); + mutex_unlock(&data->lock); return err; diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c index 015a21f0c2ef..9174ab928880 100644 --- a/drivers/iio/light/si1133.c +++ b/drivers/iio/light/si1133.c @@ -102,6 +102,9 @@ #define SI1133_INPUT_FRACTION_LOW 15 #define SI1133_LUX_OUTPUT_FRACTION 12 #define SI1133_LUX_BUFFER_SIZE 9 +#define SI1133_MEASURE_BUFFER_SIZE 3 + +#define SI1133_SIGN_BIT_INDEX 23 static const int si1133_scale_available[] = { 1, 2, 4, 8, 16, 32, 64, 128}; @@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = { } }; -static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag, +static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag, s8 shift) { return ((input << fraction) / mag) << shift; } -static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, +static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order, u8 input_fraction, s8 sign, const struct si1133_coeff *coeffs) { @@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, * The algorithm is from: * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716 */ -static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff, +static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff, const struct si1133_coeff *coeffs) { u8 x_order, y_order; @@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data, { int err; - __be16 resp; + u8 buffer[SI1133_MEASURE_BUFFER_SIZE]; err = si1133_set_adcmux(data, 0, chan->channel); if (err) @@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data, if (err) return err; - err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp), - (u8 *)&resp); + err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer), + buffer); if (err) return err; - *val = be16_to_cpu(resp); + *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); return err; } @@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val) { int err; int lux; - u32 high_vis; - u32 low_vis; - u32 ir; + s32 high_vis; + s32 low_vis; + s32 ir; u8 buffer[SI1133_LUX_BUFFER_SIZE]; /* Activate lux channels */ @@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val) if (err) return err; - high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2]; - low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5]; - ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8]; + high_vis = + sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); + + low_vis = + sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5], + SI1133_SIGN_BIT_INDEX); + + ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8], + SI1133_SIGN_BIT_INDEX); if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD) lux = si1133_calc_polynomial(high_vis, ir, diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index b0e241aaefb4..ec803c1e81df 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -22,6 +22,7 @@ #include <linux/i2c.h> #include <linux/err.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -57,6 +58,8 @@ #define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */ #define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */ +#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */ + enum vcnl4000_device_ids { VCNL4000, VCNL4010, @@ -87,6 +90,7 @@ struct vcnl4000_chip_spec { int (*init)(struct vcnl4000_data *data); int (*measure_light)(struct vcnl4000_data *data, int *val); int (*measure_proximity)(struct vcnl4000_data *data, int *val); + int (*set_power_state)(struct vcnl4000_data *data, bool on); }; static const struct i2c_device_id vcnl4000_id[] = { @@ -99,6 +103,12 @@ static const struct i2c_device_id vcnl4000_id[] = { }; MODULE_DEVICE_TABLE(i2c, vcnl4000_id); +static int vcnl4000_set_power_state(struct vcnl4000_data *data, bool on) +{ + /* no suspend op */ + return 0; +} + static int vcnl4000_init(struct vcnl4000_data *data) { int ret, prod_id; @@ -127,9 +137,31 @@ static int vcnl4000_init(struct vcnl4000_data *data) data->al_scale = 250000; mutex_init(&data->vcnl4000_lock); - return 0; + return data->chip_spec->set_power_state(data, true); }; +static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on) +{ + u16 val = on ? 0 /* power on */ : 1 /* shut down */; + int ret; + + ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, val); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, val); + if (ret < 0) + return ret; + + if (on) { + /* Wait at least one integration cycle before fetching data */ + data->vcnl4200_al.last_measurement = ktime_get(); + data->vcnl4200_ps.last_measurement = ktime_get(); + } + + return 0; +} + static int vcnl4200_init(struct vcnl4000_data *data) { int ret, id; @@ -155,36 +187,31 @@ static int vcnl4200_init(struct vcnl4000_data *data) data->rev = (ret >> 8) & 0xf; - /* Set defaults and enable both channels */ - ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, 0); - if (ret < 0) - return ret; - ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, 0); - if (ret < 0) - return ret; - data->vcnl4200_al.reg = VCNL4200_AL_DATA; data->vcnl4200_ps.reg = VCNL4200_PS_DATA; switch (id) { case VCNL4200_PROD_ID: - /* Integration time is 50ms, but the experiments */ - /* show 54ms in total. */ - data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000); - data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000); + /* Default wait time is 50ms, add 20% tolerance. */ + data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000); + /* Default wait time is 4.8ms, add 20% tolerance. */ + data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000); data->al_scale = 24000; break; case VCNL4040_PROD_ID: - /* Integration time is 80ms, add 10ms. */ - data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000); - data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000); + /* Default wait time is 80ms, add 20% tolerance. */ + data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000); + /* Default wait time is 5ms, add 20% tolerance. */ + data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000); data->al_scale = 120000; break; } - data->vcnl4200_al.last_measurement = ktime_set(0, 0); - data->vcnl4200_ps.last_measurement = ktime_set(0, 0); mutex_init(&data->vcnl4200_al.lock); mutex_init(&data->vcnl4200_ps.lock); + ret = data->chip_spec->set_power_state(data, true); + if (ret < 0) + return ret; + return 0; }; @@ -291,24 +318,28 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = { .init = vcnl4000_init, .measure_light = vcnl4000_measure_light, .measure_proximity = vcnl4000_measure_proximity, + .set_power_state = vcnl4000_set_power_state, }, [VCNL4010] = { .prod = "VCNL4010/4020", .init = vcnl4000_init, .measure_light = vcnl4000_measure_light, .measure_proximity = vcnl4000_measure_proximity, + .set_power_state = vcnl4000_set_power_state, }, [VCNL4040] = { .prod = "VCNL4040", .init = vcnl4200_init, .measure_light = vcnl4200_measure_light, .measure_proximity = vcnl4200_measure_proximity, + .set_power_state = vcnl4200_set_power_state, }, [VCNL4200] = { .prod = "VCNL4200", .init = vcnl4200_init, .measure_light = vcnl4200_measure_light, .measure_proximity = vcnl4200_measure_proximity, + .set_power_state = vcnl4200_set_power_state, }, }; @@ -323,6 +354,23 @@ static const struct iio_chan_spec vcnl4000_channels[] = { } }; +static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on) +{ + struct device *dev = &data->client->dev; + int ret; + + if (on) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + pm_runtime_put_noidle(dev); + } else { + pm_runtime_mark_last_busy(dev); + ret = pm_runtime_put_autosuspend(dev); + } + + return ret; +} + static int vcnl4000_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -332,20 +380,26 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: + ret = vcnl4000_set_pm_runtime_state(data, true); + if (ret < 0) + return ret; + switch (chan->type) { case IIO_LIGHT: ret = data->chip_spec->measure_light(data, val); - if (ret < 0) - return ret; - return IIO_VAL_INT; + if (!ret) + ret = IIO_VAL_INT; + break; case IIO_PROXIMITY: ret = data->chip_spec->measure_proximity(data, val); - if (ret < 0) - return ret; - return IIO_VAL_INT; + if (!ret) + ret = IIO_VAL_INT; + break; default: - return -EINVAL; + ret = -EINVAL; } + vcnl4000_set_pm_runtime_state(data, false); + return ret; case IIO_CHAN_INFO_SCALE: if (chan->type != IIO_LIGHT) return -EINVAL; @@ -393,7 +447,22 @@ static int vcnl4000_probe(struct i2c_client *client, indio_dev->name = VCNL4000_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; - return devm_iio_device_register(&client->dev, indio_dev); + ret = pm_runtime_set_active(&client->dev); + if (ret < 0) + goto fail_poweroff; + + ret = iio_device_register(indio_dev); + if (ret < 0) + goto fail_poweroff; + + pm_runtime_enable(&client->dev); + pm_runtime_set_autosuspend_delay(&client->dev, VCNL4000_SLEEP_DELAY_MS); + pm_runtime_use_autosuspend(&client->dev); + + return 0; +fail_poweroff: + data->chip_spec->set_power_state(data, false); + return ret; } static const struct of_device_id vcnl_4000_of_match[] = { @@ -421,13 +490,51 @@ static const struct of_device_id vcnl_4000_of_match[] = { }; MODULE_DEVICE_TABLE(of, vcnl_4000_of_match); +static int vcnl4000_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct vcnl4000_data *data = iio_priv(indio_dev); + + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); + iio_device_unregister(indio_dev); + pm_runtime_set_suspended(&client->dev); + + return data->chip_spec->set_power_state(data, false); +} + +static int __maybe_unused vcnl4000_runtime_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct vcnl4000_data *data = iio_priv(indio_dev); + + return data->chip_spec->set_power_state(data, false); +} + +static int __maybe_unused vcnl4000_runtime_resume(struct device *dev) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct vcnl4000_data *data = iio_priv(indio_dev); + + return data->chip_spec->set_power_state(data, true); +} + +static const struct dev_pm_ops vcnl4000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(vcnl4000_runtime_suspend, + vcnl4000_runtime_resume, NULL) +}; + static struct i2c_driver vcnl4000_driver = { .driver = { .name = VCNL4000_DRV_NAME, + .pm = &vcnl4000_pm_ops, .of_match_table = vcnl_4000_of_match, }, .probe = vcnl4000_probe, .id_table = vcnl4000_id, + .remove = vcnl4000_remove, }; module_i2c_driver(vcnl4000_driver); diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index fc7e910f8e8b..d32996702110 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev, * We read all axes and discard all but one, for optimized * reading, use the triggered buffer. */ - *val = le16_to_cpu(hw_values[chan->address]); + *val = (s16)le16_to_cpu(hw_values[chan->address]); ret = IIO_VAL_INT; } diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c index a0e5f530faa9..2cb11da18e0f 100644 --- a/drivers/iio/potentiostat/lmp91000.c +++ b/drivers/iio/potentiostat/lmp91000.c @@ -275,11 +275,20 @@ static int lmp91000_buffer_cb(const void *val, void *private) static const struct iio_trigger_ops lmp91000_trigger_ops = { }; -static int lmp91000_buffer_preenable(struct iio_dev *indio_dev) +static int lmp91000_buffer_postenable(struct iio_dev *indio_dev) { struct lmp91000_data *data = iio_priv(indio_dev); + int err; - return iio_channel_start_all_cb(data->cb_buffer); + err = iio_triggered_buffer_postenable(indio_dev); + if (err) + return err; + + err = iio_channel_start_all_cb(data->cb_buffer); + if (err) + iio_triggered_buffer_predisable(indio_dev); + + return err; } static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) @@ -288,12 +297,11 @@ static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) iio_channel_stop_all_cb(data->cb_buffer); - return 0; + return iio_triggered_buffer_predisable(indio_dev); } static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = { - .preenable = lmp91000_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, + .postenable = lmp91000_buffer_postenable, .predisable = lmp91000_buffer_predisable, }; diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index 9c2d9bf8f100..689b978db4f9 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -101,6 +101,17 @@ config HP03 To compile this driver as a module, choose M here: the module will be called hp03. +config ICP10100 + tristate "InvenSense ICP-101xx pressure and temperature sensor" + depends on I2C + select CRC8 + help + Say yes here to build support for InvenSense ICP-101xx barometric + pressure and temperature sensor. + + To compile this driver as a module, choose M here: the module + will be called icp10100. + config MPL115 tristate diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile index 5a79192d8cb5..083ae87ff48f 100644 --- a/drivers/iio/pressure/Makefile +++ b/drivers/iio/pressure/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_DPS310) += dps310.o obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o obj-$(CONFIG_HP03) += hp03.o +obj-$(CONFIG_ICP10100) += icp10100.o obj-$(CONFIG_MPL115) += mpl115.o obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c new file mode 100644 index 000000000000..06cb5b63a189 --- /dev/null +++ b/drivers/iio/pressure/icp10100.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2020 InvenSense, Inc. + * + * Driver for InvenSense ICP-1010xx barometric pressure and temperature sensor. + * + * Datasheet: + * http://www.invensense.com/wp-content/uploads/2018/01/DS-000186-ICP-101xx-v1.2.pdf + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/pm_runtime.h> +#include <linux/crc8.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/math64.h> +#include <linux/regulator/consumer.h> +#include <linux/iio/iio.h> + +#define ICP10100_ID_REG_GET(_reg) ((_reg) & 0x003F) +#define ICP10100_ID_REG 0x08 +#define ICP10100_RESPONSE_WORD_LENGTH 3 +#define ICP10100_CRC8_WORD_LENGTH 2 +#define ICP10100_CRC8_POLYNOMIAL 0x31 +#define ICP10100_CRC8_INIT 0xFF + +enum icp10100_mode { + ICP10100_MODE_LP, /* Low power mode: 1x sampling */ + ICP10100_MODE_N, /* Normal mode: 2x sampling */ + ICP10100_MODE_LN, /* Low noise mode: 4x sampling */ + ICP10100_MODE_ULN, /* Ultra low noise mode: 8x sampling */ + ICP10100_MODE_NB, +}; + +struct icp10100_state { + struct mutex lock; + struct i2c_client *client; + struct regulator *vdd; + enum icp10100_mode mode; + int16_t cal[4]; +}; + +struct icp10100_command { + __be16 cmd; + unsigned long wait_us; + unsigned long wait_max_us; + size_t response_word_nb; +}; + +static const struct icp10100_command icp10100_cmd_soft_reset = { + .cmd = cpu_to_be16(0x805D), + .wait_us = 170, + .wait_max_us = 200, + .response_word_nb = 0, +}; + +static const struct icp10100_command icp10100_cmd_read_id = { + .cmd = cpu_to_be16(0xEFC8), + .wait_us = 0, + .response_word_nb = 1, +}; + +static const struct icp10100_command icp10100_cmd_read_otp = { + .cmd = cpu_to_be16(0xC7F7), + .wait_us = 0, + .response_word_nb = 1, +}; + +static const struct icp10100_command icp10100_cmd_measure[] = { + [ICP10100_MODE_LP] = { + .cmd = cpu_to_be16(0x401A), + .wait_us = 1800, + .wait_max_us = 2000, + .response_word_nb = 3, + }, + [ICP10100_MODE_N] = { + .cmd = cpu_to_be16(0x48A3), + .wait_us = 6300, + .wait_max_us = 6500, + .response_word_nb = 3, + }, + [ICP10100_MODE_LN] = { + .cmd = cpu_to_be16(0x5059), + .wait_us = 23800, + .wait_max_us = 24000, + .response_word_nb = 3, + }, + [ICP10100_MODE_ULN] = { + .cmd = cpu_to_be16(0x58E0), + .wait_us = 94500, + .wait_max_us = 94700, + .response_word_nb = 3, + }, +}; + +static const uint8_t icp10100_switch_mode_otp[] = + {0xC5, 0x95, 0x00, 0x66, 0x9c}; + +DECLARE_CRC8_TABLE(icp10100_crc8_table); + +static inline int icp10100_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + int ret; + + ret = i2c_transfer(adap, msgs, num); + if (ret < 0) + return ret; + + if (ret != num) + return -EIO; + + return 0; +} + +static int icp10100_send_cmd(struct icp10100_state *st, + const struct icp10100_command *cmd, + __be16 *buf, size_t buf_len) +{ + size_t size = cmd->response_word_nb * ICP10100_RESPONSE_WORD_LENGTH; + uint8_t data[16]; + uint8_t *ptr; + uint8_t *buf_ptr = (uint8_t *)buf; + struct i2c_msg msgs[2] = { + { + .addr = st->client->addr, + .flags = 0, + .len = 2, + .buf = (uint8_t *)&cmd->cmd, + }, { + .addr = st->client->addr, + .flags = I2C_M_RD, + .len = size, + .buf = data, + }, + }; + uint8_t crc; + unsigned int i; + int ret; + + if (size > sizeof(data)) + return -EINVAL; + + if (cmd->response_word_nb > 0 && + (buf == NULL || buf_len < (cmd->response_word_nb * 2))) + return -EINVAL; + + dev_dbg(&st->client->dev, "sending cmd %#x\n", be16_to_cpu(cmd->cmd)); + + if (cmd->response_word_nb > 0 && cmd->wait_us == 0) { + /* direct command-response without waiting */ + ret = icp10100_i2c_xfer(st->client->adapter, msgs, + ARRAY_SIZE(msgs)); + if (ret) + return ret; + } else { + /* transfer command write */ + ret = icp10100_i2c_xfer(st->client->adapter, &msgs[0], 1); + if (ret) + return ret; + if (cmd->wait_us > 0) + usleep_range(cmd->wait_us, cmd->wait_max_us); + /* transfer response read if needed */ + if (cmd->response_word_nb > 0) { + ret = icp10100_i2c_xfer(st->client->adapter, &msgs[1], 1); + if (ret) + return ret; + } else { + return 0; + } + } + + /* process read words with crc checking */ + for (i = 0; i < cmd->response_word_nb; ++i) { + ptr = &data[i * ICP10100_RESPONSE_WORD_LENGTH]; + crc = crc8(icp10100_crc8_table, ptr, ICP10100_CRC8_WORD_LENGTH, + ICP10100_CRC8_INIT); + if (crc != ptr[ICP10100_CRC8_WORD_LENGTH]) { + dev_err(&st->client->dev, "crc error recv=%#x calc=%#x\n", + ptr[ICP10100_CRC8_WORD_LENGTH], crc); + return -EIO; + } + *buf_ptr++ = ptr[0]; + *buf_ptr++ = ptr[1]; + } + + return 0; +} + +static int icp10100_read_cal_otp(struct icp10100_state *st) +{ + __be16 val; + int i; + int ret; + + /* switch into OTP read mode */ + ret = i2c_master_send(st->client, icp10100_switch_mode_otp, + ARRAY_SIZE(icp10100_switch_mode_otp)); + if (ret < 0) + return ret; + if (ret != ARRAY_SIZE(icp10100_switch_mode_otp)) + return -EIO; + + /* read 4 calibration values */ + for (i = 0; i < 4; ++i) { + ret = icp10100_send_cmd(st, &icp10100_cmd_read_otp, + &val, sizeof(val)); + if (ret) + return ret; + st->cal[i] = be16_to_cpu(val); + dev_dbg(&st->client->dev, "cal[%d] = %d\n", i, st->cal[i]); + } + + return 0; +} + +static int icp10100_init_chip(struct icp10100_state *st) +{ + __be16 val; + uint16_t id; + int ret; + + /* read and check id */ + ret = icp10100_send_cmd(st, &icp10100_cmd_read_id, &val, sizeof(val)); + if (ret) + return ret; + id = ICP10100_ID_REG_GET(be16_to_cpu(val)); + if (id != ICP10100_ID_REG) { + dev_err(&st->client->dev, "invalid id %#x\n", id); + return -ENODEV; + } + + /* read calibration data from OTP */ + ret = icp10100_read_cal_otp(st); + if (ret) + return ret; + + /* reset chip */ + return icp10100_send_cmd(st, &icp10100_cmd_soft_reset, NULL, 0); +} + +static int icp10100_get_measures(struct icp10100_state *st, + uint32_t *pressure, uint16_t *temperature) +{ + const struct icp10100_command *cmd; + __be16 measures[3]; + int ret; + + pm_runtime_get_sync(&st->client->dev); + + mutex_lock(&st->lock); + cmd = &icp10100_cmd_measure[st->mode]; + ret = icp10100_send_cmd(st, cmd, measures, sizeof(measures)); + mutex_unlock(&st->lock); + if (ret) + goto error_measure; + + *pressure = (be16_to_cpu(measures[0]) << 8) | + (be16_to_cpu(measures[1]) >> 8); + *temperature = be16_to_cpu(measures[2]); + + pm_runtime_mark_last_busy(&st->client->dev); +error_measure: + pm_runtime_put_autosuspend(&st->client->dev); + return ret; +} + +static uint32_t icp10100_get_pressure(struct icp10100_state *st, + uint32_t raw_pressure, uint16_t raw_temp) +{ + static int32_t p_calib[] = {45000, 80000, 105000}; + static int32_t lut_lower = 3670016; + static int32_t lut_upper = 12058624; + static int32_t inv_quadr_factor = 16777216; + static int32_t offset_factor = 2048; + int64_t val1, val2; + int32_t p_lut[3]; + int32_t t, t_square; + int64_t a, b, c; + uint32_t pressure_mPa; + + dev_dbg(&st->client->dev, "raw: pressure = %u, temp = %u\n", + raw_pressure, raw_temp); + + /* compute p_lut values */ + t = (int32_t)raw_temp - 32768; + t_square = t * t; + val1 = (int64_t)st->cal[0] * (int64_t)t_square; + p_lut[0] = lut_lower + (int32_t)div_s64(val1, inv_quadr_factor); + val1 = (int64_t)st->cal[1] * (int64_t)t_square; + p_lut[1] = offset_factor * st->cal[3] + + (int32_t)div_s64(val1, inv_quadr_factor); + val1 = (int64_t)st->cal[2] * (int64_t)t_square; + p_lut[2] = lut_upper + (int32_t)div_s64(val1, inv_quadr_factor); + dev_dbg(&st->client->dev, "p_lut = [%d, %d, %d]\n", + p_lut[0], p_lut[1], p_lut[2]); + + /* compute a, b, c factors */ + val1 = (int64_t)p_lut[0] * (int64_t)p_lut[1] * + (int64_t)(p_calib[0] - p_calib[1]) + + (int64_t)p_lut[1] * (int64_t)p_lut[2] * + (int64_t)(p_calib[1] - p_calib[2]) + + (int64_t)p_lut[2] * (int64_t)p_lut[0] * + (int64_t)(p_calib[2] - p_calib[0]); + val2 = (int64_t)p_lut[2] * (int64_t)(p_calib[0] - p_calib[1]) + + (int64_t)p_lut[0] * (int64_t)(p_calib[1] - p_calib[2]) + + (int64_t)p_lut[1] * (int64_t)(p_calib[2] - p_calib[0]); + c = div64_s64(val1, val2); + dev_dbg(&st->client->dev, "val1 = %lld, val2 = %lld, c = %lld\n", + val1, val2, c); + val1 = (int64_t)p_calib[0] * (int64_t)p_lut[0] - + (int64_t)p_calib[1] * (int64_t)p_lut[1] - + (int64_t)(p_calib[1] - p_calib[0]) * c; + val2 = (int64_t)p_lut[0] - (int64_t)p_lut[1]; + a = div64_s64(val1, val2); + dev_dbg(&st->client->dev, "val1 = %lld, val2 = %lld, a = %lld\n", + val1, val2, a); + b = ((int64_t)p_calib[0] - a) * ((int64_t)p_lut[0] + c); + dev_dbg(&st->client->dev, "b = %lld\n", b); + + /* + * pressure_Pa = a + (b / (c + raw_pressure)) + * pressure_mPa = 1000 * pressure_Pa + */ + pressure_mPa = 1000LL * a + div64_s64(1000LL * b, c + raw_pressure); + + return pressure_mPa; +} + +static int icp10100_read_raw_measures(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2) +{ + struct icp10100_state *st = iio_priv(indio_dev); + uint32_t raw_pressure; + uint16_t raw_temp; + uint32_t pressure_mPa; + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = icp10100_get_measures(st, &raw_pressure, &raw_temp); + if (ret) + goto error_release; + + switch (chan->type) { + case IIO_PRESSURE: + pressure_mPa = icp10100_get_pressure(st, raw_pressure, + raw_temp); + /* mPa to kPa */ + *val = pressure_mPa / 1000000; + *val2 = pressure_mPa % 1000000; + ret = IIO_VAL_INT_PLUS_MICRO; + break; + case IIO_TEMP: + *val = raw_temp; + ret = IIO_VAL_INT; + break; + default: + ret = -EINVAL; + break; + } + +error_release: + iio_device_release_direct_mode(indio_dev); + return ret; +} + +static int icp10100_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct icp10100_state *st = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + case IIO_CHAN_INFO_PROCESSED: + return icp10100_read_raw_measures(indio_dev, chan, val, val2); + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_TEMP: + /* 1000 * 175°C / 65536 in m°C */ + *val = 2; + *val2 = 670288; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + break; + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_TEMP: + /* 1000 * -45°C in m°C */ + *val = -45000; + return IIO_VAL_INT; + default: + return -EINVAL; + } + break; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + mutex_lock(&st->lock); + *val = 1 << st->mode; + mutex_unlock(&st->lock); + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int icp10100_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + static int oversamplings[] = {1, 2, 4, 8}; + + switch (mask) { + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + *vals = oversamplings; + *type = IIO_VAL_INT; + *length = ARRAY_SIZE(oversamplings); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int icp10100_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct icp10100_state *st = iio_priv(indio_dev); + unsigned int mode; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + /* oversampling is always positive and a power of 2 */ + if (val <= 0 || !is_power_of_2(val)) + return -EINVAL; + mode = ilog2(val); + if (mode >= ICP10100_MODE_NB) + return -EINVAL; + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + mutex_lock(&st->lock); + st->mode = mode; + mutex_unlock(&st->lock); + iio_device_release_direct_mode(indio_dev); + return 0; + default: + return -EINVAL; + } +} + +static int icp10100_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static const struct iio_info icp10100_info = { + .read_raw = icp10100_read_raw, + .read_avail = icp10100_read_avail, + .write_raw = icp10100_write_raw, + .write_raw_get_fmt = icp10100_write_raw_get_fmt, +}; + +static const struct iio_chan_spec icp10100_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + }, { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + }, +}; + +static int icp10100_enable_regulator(struct icp10100_state *st) +{ + int ret; + + ret = regulator_enable(st->vdd); + if (ret) + return ret; + msleep(100); + + return 0; +} + +static void icp10100_disable_regulator_action(void *data) +{ + struct icp10100_state *st = data; + int ret; + + ret = regulator_disable(st->vdd); + if (ret) + dev_err(&st->client->dev, "error %d disabling vdd\n", ret); +} + +static void icp10100_pm_disable(void *data) +{ + struct device *dev = data; + + pm_runtime_put_sync_suspend(dev); + pm_runtime_disable(dev); +} + +static int icp10100_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct iio_dev *indio_dev; + struct icp10100_state *st; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "plain i2c transactions not supported\n"); + return -ENODEV; + } + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + i2c_set_clientdata(client, indio_dev); + indio_dev->dev.parent = &client->dev; + indio_dev->name = client->name; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = icp10100_channels; + indio_dev->num_channels = ARRAY_SIZE(icp10100_channels); + indio_dev->info = &icp10100_info; + + st = iio_priv(indio_dev); + mutex_init(&st->lock); + st->client = client; + st->mode = ICP10100_MODE_N; + + st->vdd = devm_regulator_get(&client->dev, "vdd"); + if (IS_ERR(st->vdd)) + return PTR_ERR(st->vdd); + + ret = icp10100_enable_regulator(st); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&client->dev, + icp10100_disable_regulator_action, st); + if (ret) + return ret; + + /* has to be done before the first i2c communication */ + crc8_populate_msb(icp10100_crc8_table, ICP10100_CRC8_POLYNOMIAL); + + ret = icp10100_init_chip(st); + if (ret) { + dev_err(&client->dev, "init chip error %d\n", ret); + return ret; + } + + /* enable runtime pm with autosuspend delay of 2s */ + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_set_autosuspend_delay(&client->dev, 2000); + pm_runtime_use_autosuspend(&client->dev); + pm_runtime_put(&client->dev); + ret = devm_add_action_or_reset(&client->dev, icp10100_pm_disable, + &client->dev); + if (ret) + return ret; + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static int __maybe_unused icp10100_suspend(struct device *dev) +{ + struct icp10100_state *st = iio_priv(dev_get_drvdata(dev)); + int ret; + + mutex_lock(&st->lock); + ret = regulator_disable(st->vdd); + mutex_unlock(&st->lock); + + return ret; +} + +static int __maybe_unused icp10100_resume(struct device *dev) +{ + struct icp10100_state *st = iio_priv(dev_get_drvdata(dev)); + int ret; + + mutex_lock(&st->lock); + + ret = icp10100_enable_regulator(st); + if (ret) + goto out_unlock; + + /* reset chip */ + ret = icp10100_send_cmd(st, &icp10100_cmd_soft_reset, NULL, 0); + +out_unlock: + mutex_unlock(&st->lock); + return ret; +} + +static UNIVERSAL_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume, + NULL); + +static const struct of_device_id icp10100_of_match[] = { + { + .compatible = "invensense,icp10100", + }, + { } +}; +MODULE_DEVICE_TABLE(of, icp10100_of_match); + +static const struct i2c_device_id icp10100_id[] = { + { "icp10100", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, icp10100_id); + +static struct i2c_driver icp10100_driver = { + .driver = { + .name = "icp10100", + .pm = &icp10100_pm, + .of_match_table = of_match_ptr(icp10100_of_match), + }, + .probe = icp10100_probe, + .id_table = icp10100_id, +}; +module_i2c_driver(icp10100_driver); + +MODULE_AUTHOR("InvenSense, Inc."); +MODULE_DESCRIPTION("InvenSense icp10100 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c index 34aff108dff5..12b893c5b0ee 100644 --- a/drivers/iio/proximity/ping.c +++ b/drivers/iio/proximity/ping.c @@ -269,7 +269,7 @@ static const struct iio_chan_spec ping_chan_spec[] = { static const struct of_device_id of_ping_match[] = { { .compatible = "parallax,ping", .data = &pa_ping_cfg}, - { .compatible = "parallax,laserping", .data = &pa_ping_cfg}, + { .compatible = "parallax,laserping", .data = &pa_laser_ping_cfg}, {}, }; diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c index 01eb8cc63076..568b76e06385 100644 --- a/drivers/iio/proximity/srf04.c +++ b/drivers/iio/proximity/srf04.c @@ -45,6 +45,7 @@ #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -56,6 +57,7 @@ struct srf04_data { struct device *dev; struct gpio_desc *gpiod_trig; struct gpio_desc *gpiod_echo; + struct gpio_desc *gpiod_power; struct mutex lock; int irqnr; ktime_t ts_rising; @@ -63,6 +65,7 @@ struct srf04_data { struct completion rising; struct completion falling; const struct srf04_cfg *cfg; + int startup_time_ms; }; static const struct srf04_cfg srf04_cfg = { @@ -97,6 +100,9 @@ static int srf04_read(struct srf04_data *data) u64 dt_ns; u32 time_ns, distance_mm; + if (data->gpiod_power) + pm_runtime_get_sync(data->dev); + /* * just one read-echo-cycle can take place at a time * ==> lock against concurrent reading calls @@ -110,6 +116,11 @@ static int srf04_read(struct srf04_data *data) udelay(data->cfg->trigger_pulse_us); gpiod_set_value(data->gpiod_trig, 0); + if (data->gpiod_power) { + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); + } + /* it should not take more than 20 ms until echo is rising */ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50); if (ret < 0) { @@ -268,6 +279,22 @@ static int srf04_probe(struct platform_device *pdev) return PTR_ERR(data->gpiod_echo); } + data->gpiod_power = devm_gpiod_get_optional(dev, "power", + GPIOD_OUT_LOW); + if (IS_ERR(data->gpiod_power)) { + dev_err(dev, "failed to get power-gpios: err=%ld\n", + PTR_ERR(data->gpiod_power)); + return PTR_ERR(data->gpiod_power); + } + if (data->gpiod_power) { + + if (of_property_read_u32(dev->of_node, "startup-time-ms", + &data->startup_time_ms)) + data->startup_time_ms = 100; + dev_dbg(dev, "using power gpio: startup-time-ms=%d\n", + data->startup_time_ms); + } + if (gpiod_cansleep(data->gpiod_echo)) { dev_err(data->dev, "cansleep-GPIOs not supported\n"); return -ENODEV; @@ -296,14 +323,81 @@ static int srf04_probe(struct platform_device *pdev) indio_dev->channels = srf04_chan_spec; indio_dev->num_channels = ARRAY_SIZE(srf04_chan_spec); - return devm_iio_device_register(dev, indio_dev); + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(data->dev, "iio_device_register: %d\n", ret); + return ret; + } + + if (data->gpiod_power) { + pm_runtime_set_autosuspend_delay(data->dev, 1000); + pm_runtime_use_autosuspend(data->dev); + + ret = pm_runtime_set_active(data->dev); + if (ret) { + dev_err(data->dev, "pm_runtime_set_active: %d\n", ret); + iio_device_unregister(indio_dev); + } + + pm_runtime_enable(data->dev); + pm_runtime_idle(data->dev); + } + + return ret; } +static int srf04_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct srf04_data *data = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + + if (data->gpiod_power) { + pm_runtime_disable(data->dev); + pm_runtime_set_suspended(data->dev); + } + + return 0; +} + +static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct srf04_data *data = iio_priv(indio_dev); + + gpiod_set_value(data->gpiod_power, 0); + + return 0; +} + +static int __maybe_unused srf04_pm_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct srf04_data *data = iio_priv(indio_dev); + + gpiod_set_value(data->gpiod_power, 1); + msleep(data->startup_time_ms); + + return 0; +} + +static const struct dev_pm_ops srf04_pm_ops = { + SET_RUNTIME_PM_OPS(srf04_pm_runtime_suspend, + srf04_pm_runtime_resume, NULL) +}; + static struct platform_driver srf04_driver = { .probe = srf04_probe, + .remove = srf04_remove, .driver = { .name = "srf04-gpio", .of_match_table = of_srf04_match, + .pm = &srf04_pm_ops, }, }; diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index 2e0d32aa8436..7d8962d6566a 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -75,14 +75,27 @@ static const void *stm32h7_valids_table[][MAX_VALIDS] = { { }, /* timer 17 */ }; +struct stm32_timer_trigger_regs { + u32 cr1; + u32 cr2; + u32 psc; + u32 arr; + u32 cnt; + u32 smcr; +}; + struct stm32_timer_trigger { struct device *dev; struct regmap *regmap; struct clk *clk; + bool enabled; u32 max_arr; const void *triggers; const void *valids; bool has_trgo2; + struct mutex lock; /* concurrent sysfs configuration */ + struct list_head tr_list; + struct stm32_timer_trigger_regs bak; }; struct stm32_timer_trigger_cfg { @@ -106,7 +119,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv, { unsigned long long prd, div; int prescaler = 0; - u32 ccer, cr1; + u32 ccer; /* Period and prescaler values depends of clock rate */ div = (unsigned long long)clk_get_rate(priv->clk); @@ -136,9 +149,11 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv, if (ccer & TIM_CCER_CCXE) return -EBUSY; - regmap_read(priv->regmap, TIM_CR1, &cr1); - if (!(cr1 & TIM_CR1_CEN)) + mutex_lock(&priv->lock); + if (!priv->enabled) { + priv->enabled = true; clk_enable(priv->clk); + } regmap_write(priv->regmap, TIM_PSC, prescaler); regmap_write(priv->regmap, TIM_ARR, prd - 1); @@ -157,30 +172,41 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv, /* Enable controller */ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN); + mutex_unlock(&priv->lock); return 0; } -static void stm32_timer_stop(struct stm32_timer_trigger *priv) +static void stm32_timer_stop(struct stm32_timer_trigger *priv, + struct iio_trigger *trig) { - u32 ccer, cr1; + u32 ccer; regmap_read(priv->regmap, TIM_CCER, &ccer); if (ccer & TIM_CCER_CCXE) return; - regmap_read(priv->regmap, TIM_CR1, &cr1); - if (cr1 & TIM_CR1_CEN) - clk_disable(priv->clk); - + mutex_lock(&priv->lock); /* Stop timer */ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); regmap_write(priv->regmap, TIM_PSC, 0); regmap_write(priv->regmap, TIM_ARR, 0); + /* Force disable master mode */ + if (stm32_timer_is_trgo2_name(trig->name)) + regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0); + else + regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0); + /* Make sure that registers are updated */ regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG); + + if (priv->enabled) { + priv->enabled = false; + clk_disable(priv->clk); + } + mutex_unlock(&priv->lock); } static ssize_t stm32_tt_store_frequency(struct device *dev, @@ -197,7 +223,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev, return ret; if (freq == 0) { - stm32_timer_stop(priv); + stm32_timer_stop(priv, trig); } else { ret = stm32_timer_start(priv, trig, freq); if (ret) @@ -295,8 +321,15 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev, for (i = 0; i <= master_mode_max; i++) { if (!strncmp(master_mode_table[i], buf, strlen(master_mode_table[i]))) { + mutex_lock(&priv->lock); + if (!priv->enabled) { + /* Clock should be enabled first */ + priv->enabled = true; + clk_enable(priv->clk); + } regmap_update_bits(priv->regmap, TIM_CR2, mask, i << shift); + mutex_unlock(&priv->lock); return len; } } @@ -354,11 +387,21 @@ static const struct attribute_group *stm32_trigger_attr_groups[] = { static const struct iio_trigger_ops timer_trigger_ops = { }; -static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv) +static void stm32_unregister_iio_triggers(struct stm32_timer_trigger *priv) +{ + struct iio_trigger *tr; + + list_for_each_entry(tr, &priv->tr_list, alloc_list) + iio_trigger_unregister(tr); +} + +static int stm32_register_iio_triggers(struct stm32_timer_trigger *priv) { int ret; const char * const *cur = priv->triggers; + INIT_LIST_HEAD(&priv->tr_list); + while (cur && *cur) { struct iio_trigger *trig; bool cur_is_trgo = stm32_timer_is_trgo_name(*cur); @@ -385,9 +428,13 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv) iio_trigger_set_drvdata(trig, priv); - ret = devm_iio_trigger_register(priv->dev, trig); - if (ret) + ret = iio_trigger_register(trig); + if (ret) { + stm32_unregister_iio_triggers(priv); return ret; + } + + list_add_tail(&trig->alloc_list, &priv->tr_list); cur++; } @@ -434,7 +481,6 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct stm32_timer_trigger *priv = iio_priv(indio_dev); - u32 dat; switch (mask) { case IIO_CHAN_INFO_RAW: @@ -445,19 +491,23 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev, return -EINVAL; case IIO_CHAN_INFO_ENABLE: + mutex_lock(&priv->lock); if (val) { - regmap_read(priv->regmap, TIM_CR1, &dat); - if (!(dat & TIM_CR1_CEN)) + if (!priv->enabled) { + priv->enabled = true; clk_enable(priv->clk); + } regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN); } else { - regmap_read(priv->regmap, TIM_CR1, &dat); regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); - if (dat & TIM_CR1_CEN) + if (priv->enabled) { + priv->enabled = false; clk_disable(priv->clk); + } } + mutex_unlock(&priv->lock); return 0; } @@ -553,7 +603,6 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev, { struct stm32_timer_trigger *priv = iio_priv(indio_dev); int sms = stm32_enable_mode2sms(mode); - u32 val; if (sms < 0) return sms; @@ -561,11 +610,12 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev, * Triggered mode sets CEN bit automatically by hardware. So, first * enable counter clock, so it can use it. Keeps it in sync with CEN. */ - if (sms == 6) { - regmap_read(priv->regmap, TIM_CR1, &val); - if (!(val & TIM_CR1_CEN)) - clk_enable(priv->clk); + mutex_lock(&priv->lock); + if (sms == 6 && !priv->enabled) { + clk_enable(priv->clk); + priv->enabled = true; } + mutex_unlock(&priv->lock); regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); @@ -749,8 +799,9 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev) priv->triggers = triggers_table[index]; priv->valids = cfg->valids_table[index]; stm32_timer_detect_trgo2(priv); + mutex_init(&priv->lock); - ret = stm32_setup_iio_triggers(priv); + ret = stm32_register_iio_triggers(priv); if (ret) return ret; @@ -759,6 +810,77 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev) return 0; } +static int stm32_timer_trigger_remove(struct platform_device *pdev) +{ + struct stm32_timer_trigger *priv = platform_get_drvdata(pdev); + u32 val; + + /* Unregister triggers before everything can be safely turned off */ + stm32_unregister_iio_triggers(priv); + + /* Check if nobody else use the timer, then disable it */ + regmap_read(priv->regmap, TIM_CCER, &val); + if (!(val & TIM_CCER_CCXE)) + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); + + if (priv->enabled) + clk_disable(priv->clk); + + return 0; +} + +static int __maybe_unused stm32_timer_trigger_suspend(struct device *dev) +{ + struct stm32_timer_trigger *priv = dev_get_drvdata(dev); + + /* Only take care of enabled timer: don't disturb other MFD child */ + if (priv->enabled) { + /* Backup registers that may get lost in low power mode */ + regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1); + regmap_read(priv->regmap, TIM_CR2, &priv->bak.cr2); + regmap_read(priv->regmap, TIM_PSC, &priv->bak.psc); + regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr); + regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt); + regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr); + + /* Disable the timer */ + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); + clk_disable(priv->clk); + } + + return 0; +} + +static int __maybe_unused stm32_timer_trigger_resume(struct device *dev) +{ + struct stm32_timer_trigger *priv = dev_get_drvdata(dev); + int ret; + + if (priv->enabled) { + ret = clk_enable(priv->clk); + if (ret) + return ret; + + /* restore master/slave modes */ + regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr); + regmap_write(priv->regmap, TIM_CR2, priv->bak.cr2); + + /* restore sampling_frequency (trgo / trgo2 triggers) */ + regmap_write(priv->regmap, TIM_PSC, priv->bak.psc); + regmap_write(priv->regmap, TIM_ARR, priv->bak.arr); + regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt); + + /* Also re-enables the timer */ + regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_timer_trigger_pm_ops, + stm32_timer_trigger_suspend, + stm32_timer_trigger_resume); + static const struct stm32_timer_trigger_cfg stm32_timer_trg_cfg = { .valids_table = valids_table, .num_valids_table = ARRAY_SIZE(valids_table), @@ -783,9 +905,11 @@ MODULE_DEVICE_TABLE(of, stm32_trig_of_match); static struct platform_driver stm32_timer_trigger_driver = { .probe = stm32_timer_trigger_probe, + .remove = stm32_timer_trigger_remove, .driver = { .name = "stm32-timer-trigger", .of_match_table = stm32_trig_of_match, + .pm = &stm32_timer_trigger_pm_ops, }, }; module_platform_driver(stm32_timer_trigger_driver); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 68cc1b2d6824..15e99a888427 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1191,6 +1191,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, /* Sharing an ib_cm_id with different handlers is not * supported */ spin_unlock_irqrestore(&cm.lock, flags); + ib_destroy_cm_id(cm_id); return ERR_PTR(-EINVAL); } refcount_inc(&cm_id_priv->refcount); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 72f032160c4b..2dec3a02ab9f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3212,19 +3212,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, int ret; id_priv = container_of(id, struct rdma_id_private, id); + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); if (id_priv->state == RDMA_CM_IDLE) { ret = cma_bind_addr(id, src_addr, dst_addr); - if (ret) + if (ret) { + memset(cma_dst_addr(id_priv), 0, + rdma_addr_size(dst_addr)); return ret; + } } - if (cma_family(id_priv) != dst_addr->sa_family) + if (cma_family(id_priv) != dst_addr->sa_family) { + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); return -EINVAL; + } - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); return -EINVAL; + } - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); } else { diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index b1457b3464d3..cf42acca4a3a 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -338,6 +338,20 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, qp->pd = pd; qp->uobject = uobj; qp->real_qp = qp; + + qp->qp_type = attr->qp_type; + qp->rwq_ind_tbl = attr->rwq_ind_tbl; + qp->send_cq = attr->send_cq; + qp->recv_cq = attr->recv_cq; + qp->srq = attr->srq; + qp->rwq_ind_tbl = attr->rwq_ind_tbl; + qp->event_handler = attr->event_handler; + + atomic_set(&qp->usecnt, 0); + spin_lock_init(&qp->mr_lock); + INIT_LIST_HEAD(&qp->rdma_mrs); + INIT_LIST_HEAD(&qp->sig_mrs); + /* * We don't track XRC QPs for now, because they don't have PD * and more importantly they are created internaly by driver, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f6c255202d7f..d0b3d35ad3e4 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -896,7 +896,9 @@ static int add_one_compat_dev(struct ib_device *device, cdev->dev.parent = device->dev.parent; rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); cdev->dev.release = compatdev_release; - dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); + ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); + if (ret) + goto add_err; ret = device_add(&cdev->dev); if (ret) diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index ade71823370f..da8adadf4755 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) { struct list_head *e, *tmp; - list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) + list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) { + list_del(e); kfree(list_entry(e, struct iwcm_work, free_list)); + } } static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 37b433aa7306..9eec26d10d7b 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -918,6 +918,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); + if (strlen(name) == 0) { + err = -EINVAL; + goto done; + } err = ib_device_rename(device, name); goto done; } @@ -1514,7 +1518,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(ibdev_name)); - if (strchr(ibdev_name, '%')) + if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) return -EINVAL; nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); @@ -1757,6 +1761,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, if (ret) goto err_msg; } else { + if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) + goto err_msg; qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 4fad732f9b3c..06e5b6787443 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, return 1; } +static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, + u32 sg_cnt, enum dma_data_direction dir) +{ + if (is_pci_p2pdma_page(sg_page(sg))) + pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir); + else + ib_dma_unmap_sg(dev, sg, sg_cnt, dir); +} + +static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, + u32 sg_cnt, enum dma_data_direction dir) +{ + if (is_pci_p2pdma_page(sg_page(sg))) + return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); + return ib_dma_map_sg(dev, sg, sg_cnt, dir); +} + /** * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context * @ctx: context to initialize @@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, struct ib_device *dev = qp->pd->device; int ret; - if (is_pci_p2pdma_page(sg_page(sg))) - ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); - else - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); - + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); if (!ret) return -ENOMEM; sg_cnt = ret; @@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, return ret; out_unmap_sg: - ib_dma_unmap_sg(dev, sg, sg_cnt, dir); + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_init); @@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, break; } - if (is_pci_p2pdma_page(sg_page(sg))) - pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg, - sg_cnt, dir); - else - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 2b4d80393bd0..75e7ec017836 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -340,20 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, return NULL; if (qp_attr_mask & IB_QP_PORT) - new_pps->main.port_num = - (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num; - if (qp_attr_mask & IB_QP_PKEY_INDEX) - new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index : - qp_attr->pkey_index; - if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT)) - new_pps->main.state = IB_PORT_PKEY_VALID; - - if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) { + new_pps->main.port_num = qp_attr->port_num; + else if (qp_pps) new_pps->main.port_num = qp_pps->main.port_num; + + if (qp_attr_mask & IB_QP_PKEY_INDEX) + new_pps->main.pkey_index = qp_attr->pkey_index; + else if (qp_pps) new_pps->main.pkey_index = qp_pps->main.pkey_index; - if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) - new_pps->main.state = IB_PORT_PKEY_VALID; - } + + if (((qp_attr_mask & IB_QP_PKEY_INDEX) && + (qp_attr_mask & IB_QP_PORT)) || + (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)) + new_pps->main.state = IB_PORT_PKEY_VALID; if (qp_attr_mask & IB_QP_ALT_PATH) { new_pps->alt.port_num = qp_attr->alt_port_num; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index b8c657b28380..3b1e627d9a8d 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -181,14 +181,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr, odp_data->page_shift = PAGE_SHIFT; odp_data->notifier.ops = ops; + /* + * A mmget must be held when registering a notifier, the owming_mm only + * has a mm_grab at this point. + */ + if (!mmget_not_zero(umem->owning_mm)) { + ret = -EFAULT; + goto out_free; + } + odp_data->tgid = get_pid(root->tgid); ret = ib_init_umem_odp(odp_data, ops); - if (ret) { - put_pid(odp_data->tgid); - kfree(odp_data); - return ERR_PTR(ret); - } + if (ret) + goto out_tgid; + mmput(umem->owning_mm); return odp_data; + +out_tgid: + put_pid(odp_data->tgid); + mmput(umem->owning_mm); +out_free: + kfree(odp_data); + return ERR_PTR(ret); } EXPORT_SYMBOL(ib_umem_odp_alloc_child); @@ -261,8 +275,8 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp) mmu_interval_notifier_remove(&umem_odp->notifier); kvfree(umem_odp->dma_list); kvfree(umem_odp->page_list); - put_pid(umem_odp->tgid); } + put_pid(umem_odp->tgid); kfree(umem_odp); } EXPORT_SYMBOL(ib_umem_odp_release); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 1235ffb2389b..da229eab5903 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = { .llseek = no_llseek, }; +static struct ib_umad_port *get_port(struct ib_device *ibdev, + struct ib_umad_device *umad_dev, + unsigned int port) +{ + if (!umad_dev) + return ERR_PTR(-EOPNOTSUPP); + if (!rdma_is_port_valid(ibdev, port)) + return ERR_PTR(-EINVAL); + if (!rdma_cap_ib_mad(ibdev, port)) + return ERR_PTR(-EOPNOTSUPP); + + return &umad_dev->ports[port - rdma_start_port(ibdev)]; +} + static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { - struct ib_umad_device *umad_dev = client_data; + struct ib_umad_port *port = get_port(ibdev, client_data, res->port); - if (!rdma_is_port_valid(ibdev, res->port)) - return -EINVAL; + if (IS_ERR(port)) + return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev; - + res->cdev = &port->dev; return 0; } @@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad"); static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { - struct ib_umad_device *umad_dev = - ib_get_client_data(ibdev, &umad_client); + struct ib_umad_port *port = get_port(ibdev, client_data, res->port); - if (!rdma_is_port_valid(ibdev, res->port)) - return -EINVAL; + if (IS_ERR(port)) + return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev; - + res->cdev = &port->sm_dev; return 0; } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 025933752e1d..060b4ebbd2ba 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1445,16 +1445,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs, if (ret) goto err_cb; - qp->pd = pd; - qp->send_cq = attr.send_cq; - qp->recv_cq = attr.recv_cq; - qp->srq = attr.srq; - qp->rwq_ind_tbl = ind_tbl; - qp->event_handler = attr.event_handler; - qp->qp_type = attr.qp_type; - atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); - qp->port = 0; if (attr.send_cq) atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3ebae3b65c28..e62c9dfc7837 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1185,16 +1185,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd, if (ret) goto err; - qp->qp_type = qp_init_attr->qp_type; - qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; - - atomic_set(&qp->usecnt, 0); - qp->mrs_used = 0; - spin_lock_init(&qp->mr_lock); - INIT_LIST_HEAD(&qp->rdma_mrs); - INIT_LIST_HEAD(&qp->sig_mrs); - qp->port = 0; - if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { struct ib_qp *xrc_qp = create_xrc_qp_user(qp, qp_init_attr, udata); diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index d106d23016ba..c22ab7b5163b 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -78,7 +78,7 @@ static int read_efi_var(const char *name, unsigned long *size, *size = 0; *return_data = NULL; - if (!efi_enabled(EFI_RUNTIME_SERVICES)) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return -EOPNOTSUPP; uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c2f0d9ba93de..13e4203497b3 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -141,6 +141,7 @@ static int defer_packet_queue( */ xchg(&pq->state, SDMA_PKT_Q_DEFERRED); if (list_empty(&pq->busy.list)) { + pq->busy.lock = &sde->waitlock; iowait_get_priority(&pq->busy); iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); } @@ -155,6 +156,7 @@ static void activate_packet_queue(struct iowait *wait, int reason) { struct hfi1_user_sdma_pkt_q *pq = container_of(wait, struct hfi1_user_sdma_pkt_q, busy); + pq->busy.lock = NULL; xchg(&pq->state, SDMA_PKT_Q_ACTIVE); wake_up(&wait->wait_dma); }; @@ -256,6 +258,21 @@ pq_reqs_nomem: return ret; } +static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) +{ + unsigned long flags; + seqlock_t *lock = pq->busy.lock; + + if (!lock) + return; + write_seqlock_irqsave(lock, flags); + if (!list_empty(&pq->busy.list)) { + list_del_init(&pq->busy.list); + pq->busy.lock = NULL; + } + write_sequnlock_irqrestore(lock, flags); +} + int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { @@ -281,6 +298,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); + flush_pq_iowait(pq); kfree(pq); } else { spin_unlock(&fd->pq_rcu_lock); @@ -587,11 +605,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (ret < 0) { if (ret != -EBUSY) goto free_req; - wait_event_interruptible_timeout( + if (wait_event_interruptible_timeout( pq->busy.wait_dma, - (pq->state == SDMA_PKT_Q_ACTIVE), + pq->state == SDMA_PKT_Q_ACTIVE, msecs_to_jiffies( - SDMA_IOWAIT_TIMEOUT)); + SDMA_IOWAIT_TIMEOUT)) <= 0) + flush_pq_iowait(pq); } } *count += idx; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 089e201d7550..2f6323ad9c59 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, opa_get_lid(packet->dlid, 9B)); if (!mcast) goto drop; + rcu_read_lock(); list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; if (hfi1_do_pkey_check(packet)) - goto drop; + goto unlock_drop; spin_lock_irqsave(&packet->qp->r_lock, flags); packet_handler = qp_ok(packet); if (likely(packet_handler)) @@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, ibp->rvp.n_pkt_drops++; spin_unlock_irqrestore(&packet->qp->r_lock, flags); } + rcu_read_unlock(); /* * Notify rvt_multicast_detach() if it is waiting for us * to finish. diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 367a71bc5f4b..3dec3de903b7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, dump_cqe(dev, cqe); } +static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + u16 tail, u16 head) +{ + u16 idx; + + do { + idx = tail & (qp->sq.wqe_cnt - 1); + if (idx == head) + break; + + tail = qp->sq.w_list[idx].next; + } while (1); + tail = qp->sq.w_list[idx].next; + qp->sq.last_poll = tail; +} + static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) { mlx5_frag_buf_free(dev->mdev, &buf->frag_buf); @@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, } static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, - int *npolled, int is_send) + int *npolled, bool is_send) { struct mlx5_ib_wq *wq; unsigned int cur; @@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, return; for (i = 0; i < cur && np < num_entries; i++) { - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + unsigned int idx; + + idx = (is_send) ? wq->last_poll : wq->tail; + idx &= (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; wq->tail++; + if (is_send) + wq->last_poll = wq->w_list[idx].next; np++; wc->qp = &qp->ibqp; wc++; @@ -473,6 +495,7 @@ repoll: wqe_ctr = be16_to_cpu(cqe64->wqe_counter); idx = wqe_ctr & (wq->wqe_cnt - 1); handle_good_req(wc, cqe64, wq, idx); + handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); wc->wr_id = wq->wrid[idx]; wq->tail = wq->wqe_head[idx] + 1; wc->status = IB_WC_SUCCESS; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e4bcfa81b70a..ffa7c2100edb 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5722,9 +5722,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port - 1); - /* Q counters are in the beginning of all counters */ return rdma_alloc_hw_stats_struct(cnts->names, - cnts->num_q_counters, + cnts->num_q_counters + + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9bffcc93587..f3bdbd5e5096 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -288,6 +288,7 @@ struct mlx5_ib_wq { unsigned head; unsigned tail; u16 cur_post; + u16 last_poll; void *cur_edge; }; @@ -636,6 +637,7 @@ struct mlx5_ib_mr { /* For ODP and implicit */ atomic_t num_deferred_work; + wait_queue_head_t q_deferred_work; struct xarray implicit_children; union { struct rcu_head rcu; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4216814ba871..bf50cd91f472 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt) mr->parent = NULL; mlx5_mr_cache_free(mr->dev, mr); ib_umem_odp_release(odp); - atomic_dec(&imr->num_deferred_work); + if (atomic_dec_and_test(&imr->num_deferred_work)) + wake_up(&imr->q_deferred_work); } static void free_implicit_child_mr_work(struct work_struct *work) @@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, imr->umem = &umem_odp->umem; imr->is_odp_implicit = true; atomic_set(&imr->num_deferred_work, 0); + init_waitqueue_head(&imr->q_deferred_work); xa_init(&imr->implicit_children); err = mlx5_ib_update_xlt(imr, 0, @@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) * under xa_lock while the child is in the xarray. Thus at this point * it is only decreasing, and all work holding it is now on the wq. */ - if (atomic_read(&imr->num_deferred_work)) { - flush_workqueue(system_unbound_wq); - WARN_ON(atomic_read(&imr->num_deferred_work)); - } + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); /* * Fence the imr before we destroy the children. This allows us to @@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr) /* Wait for all running page-fault handlers to finish. */ synchronize_srcu(&mr->dev->odp_srcu); - if (atomic_read(&mr->num_deferred_work)) { - flush_workqueue(system_unbound_wq); - WARN_ON(atomic_read(&mr->num_deferred_work)); - } + wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work)); dma_fence_odp_mr(mr); } @@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work) u32 i; for (i = 0; i < work->num_sge; ++i) - atomic_dec(&work->frags[i].mr->num_deferred_work); + if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work)) + wake_up(&work->frags[i].mr->q_deferred_work); kvfree(work); } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 957f3a52589b..8fe149e808af 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3775,6 +3775,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, qp->sq.cur_post = 0; if (qp->sq.wqe_cnt) qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); + qp->sq.last_poll = 0; qp->db.db[MLX5_RCV_DBR] = 0; qp->db.db[MLX5_SND_DBR] = 0; } @@ -6204,6 +6205,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, if (udata->outlen && udata->outlen < min_resp_len) return ERR_PTR(-EINVAL); + if (!capable(CAP_SYS_RAWIO) && + init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) + return ERR_PTR(-EPERM); + dev = to_mdev(pd->device); switch (init_attr->wq_type) { case IB_WQT_RQ: diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 33778d451b82..5ef93f8f17a1 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) if (mcast == NULL) goto drop; this_cpu_inc(ibp->pmastats->n_multicast_rcv); + rcu_read_lock(); list_for_each_entry_rcu(p, &mcast->qp_list, list) qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); + rcu_read_unlock(); /* * Notify rvt_multicast_detach() if it is waiting for us * to finish. diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 13d7f66eadab..5724cbbe38b1 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) if (cq->ip) kref_put(&cq->ip->ref, rvt_release_mmap_info); else - vfree(cq->queue); + vfree(cq->kqueue); } /** diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 96ed349c0939..5cd40fb9e20c 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev) { .max_segment_size = SZ_2G }; base_dev->num_comp_vectors = num_possible_cpus(); + xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); + xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); + ib_set_device_ops(base_dev, &siw_device_ops); rv = ib_device_set_netdev(base_dev, netdev, 1); if (rv) @@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev) sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR; sdev->attrs.max_srq_sge = SIW_MAX_SGE; - xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); - xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); - INIT_LIST_HEAD(&sdev->cep_list); INIT_LIST_HEAD(&sdev->qp_list); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b273e421e910..a1a035270cab 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn) } } +static void +isert_wait4cmds(struct iscsi_conn *conn) +{ + isert_info("iscsi_conn %p\n", conn); + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); + } +} + /** * isert_put_unsol_pending_cmds() - Drop commands waiting for * unsolicitate dataout @@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) ib_drain_qp(isert_conn->qp); isert_put_unsol_pending_cmds(conn); + isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/input/input.c b/drivers/input/input.c index fce43e62dd45..3cfd2c18eebd 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t) input_value_sync }; + input_set_timestamp(dev, ktime_get()); input_pass_values(dev, vals, ARRAY_SIZE(vals)); if (dev->rep[REP_PERIOD]) diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c index 14b55bacdd0f..fb078e049413 100644 --- a/drivers/input/keyboard/tm2-touchkey.c +++ b/drivers/input/keyboard/tm2-touchkey.c @@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = { .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF, }; +static const struct touchkey_variant tc360_touchkey_variant = { + .keycode_reg = 0x00, + .base_reg = 0x00, + .fixed_regulator = true, + .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON, + .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF, +}; + static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { @@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = { }, { .compatible = "cypress,aries-touchkey", .data = &aries_touchkey_variant, + }, { + .compatible = "coreriver,tc360-touchkey", + .data = &tc360_touchkey_variant, }, { }, }; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 2c666fb34625..4d2036209b45 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = { "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ + "SYN3257", /* HP Envy 13-ad105ng */ NULL }; diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index 6adea8a3e8fb..ffa39ab153f2 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c @@ -1203,8 +1203,8 @@ static int rmi_f11_initialize(struct rmi_function *fn) * If distance threshold values are set, switch to reduced reporting * mode so they actually get used by the controller. */ - if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] || - ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) { + if (sensor->axis_align.delta_x_threshold || + sensor->axis_align.delta_y_threshold) { ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK; ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED; } diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c index c768186ce856..f9ca5502ac8c 100644 --- a/drivers/input/touchscreen/chipone_icn8505.c +++ b/drivers/input/touchscreen/chipone_icn8505.c @@ -288,7 +288,7 @@ static int icn8505_upload_fw(struct icn8505_data *icn8505) * we may need it at resume. Having loaded it once will make the * firmware class code cache it at suspend/resume. */ - error = request_firmware(&fw, icn8505->firmware_name, dev); + error = firmware_request_platform(&fw, icn8505->firmware_name, dev); if (error) { dev_err(dev, "Firmware request error %d\n", error); return error; diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 6ed9f22e6401..fe245439adee 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, return 0; } -static bool raydium_i2c_boot_trigger(struct i2c_client *client) +static int raydium_i2c_boot_trigger(struct i2c_client *client) { static const u8 cmd[7][6] = { { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 }, @@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client) } } - return false; + return 0; } -static bool raydium_i2c_fw_trigger(struct i2c_client *client) +static int raydium_i2c_fw_trigger(struct i2c_client *client) { static const u8 cmd[5][11] = { { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 }, @@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client) } } - return false; + return 0; } static int raydium_i2c_check_path(struct i2c_client *client) diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index ad8b6a2bfd36..8fa2f3b7cfd8 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -288,7 +288,7 @@ static int silead_ts_load_fw(struct i2c_client *client) dev_dbg(dev, "Firmware file name: %s", data->fw_name); - error = request_firmware(&fw, data->fw_name, dev); + error = firmware_request_platform(&fw, data->fw_name, dev); if (error) { dev_err(dev, "Firmware request error %d\n", error); return error; diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index f277e467156f..2c6515e3ecf1 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -445,6 +445,11 @@ struct icc_path *of_icc_get(struct device *dev, const char *name) path->name = kasprintf(GFP_KERNEL, "%s-%s", src_node->name, dst_node->name); + if (!path->name) { + kfree(path); + return ERR_PTR(-ENOMEM); + } + return path; } EXPORT_SYMBOL_GPL(of_icc_get); @@ -579,6 +584,10 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id) } path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name); + if (!path->name) { + kfree(path); + path = ERR_PTR(-ENOMEM); + } out: mutex_unlock(&icc_lock); return path; diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 2104fb8afc06..9f33fdb3bb05 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -14,8 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o -arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o +obj-$(CONFIG_ARM_SMMU) += arm_smmu.o +arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index aac132bd1ef0..20cce366e951 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3826,7 +3826,7 @@ int amd_iommu_activate_guest_mode(void *data) entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, NULL); + ir_data->irq_2_irte.index, entry, ir_data); } EXPORT_SYMBOL(amd_iommu_activate_guest_mode); @@ -3852,7 +3852,7 @@ int amd_iommu_deactivate_guest_mode(void *data) APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, NULL); + ir_data->irq_2_irte.index, entry, ir_data); } EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 2759a8d57b7f..6be3853a5d97 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2523,6 +2523,7 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; acpi_status status; int i, remap_cache_sz, ret = 0; + u32 pci_id; if (!amd_iommu_detected) return -ENODEV; @@ -2610,6 +2611,16 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + /* Disable IOMMU if there's Stoney Ridge graphics */ + for (i = 0; i < 32; i++) { + pci_id = read_pci_config(0, i, 0, 0); + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { + pr_info("Disable IOMMU on Stoney Ridge\n"); + amd_iommu_disabled = true; + break; + } + } + /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); @@ -2718,7 +2729,7 @@ static int __init state_next(void) ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { - pr_info("AMD IOMMU disabled on kernel command-line\n"); + pr_info("AMD IOMMU disabled\n"); init_state = IOMMU_CMDLINE_DISABLED; ret = -EINVAL; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a2e96a5fd9a7..ba128d1cdaee 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -177,15 +177,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, start -= iova_offset(iovad, start); num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); - msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); - if (!msi_page) - return -ENOMEM; - for (i = 0; i < num_pages; i++) { - msi_page[i].phys = start; - msi_page[i].iova = start; - INIT_LIST_HEAD(&msi_page[i].list); - list_add(&msi_page[i].list, &cookie->msi_page_list); + msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); + if (!msi_page) + return -ENOMEM; + + msi_page->phys = start; + msi_page->iova = start; + INIT_LIST_HEAD(&msi_page->list); + list_add(&msi_page->list, &cookie->msi_page_list); start += iovad->granule; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 071bb42bbbc5..f77dae7ba7d4 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/iommu.h> #include <linux/numa.h> +#include <linux/limits.h> #include <asm/irq_remapping.h> #include <asm/iommu_table.h> @@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) BUG_ON(dev->is_virtfn); + /* + * Ignore devices that have a domain number higher than what can + * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000 + */ + if (pci_domain_nr(dev->bus) > U16_MAX) + return NULL; + /* Only generate path[] for device addition event */ if (event == BUS_NOTIFY_ADD_DEVICE) for (tmp = dev; tmp; tmp = tmp->bus->self) @@ -363,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) { struct dmar_drhd_unit *dmaru; - list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list, + dmar_rcu_check()) if (dmaru->segment == drhd->segment && dmaru->reg_base_addr == drhd->address) return dmaru; @@ -440,12 +449,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, /* Check for NUL termination within the designated length */ if (strnlen(andd->device_name, header->length - 8) == header->length - 8) { - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, + pr_warn(FW_BUG "Your BIOS is broken; ANDD object name is not NUL-terminated\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); return -EINVAL; } pr_info("ANDD device: %x name: %s\n", andd->device_number, @@ -471,14 +481,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) return 0; } } - WARN_TAINT( - 1, TAINT_FIRMWARE_WORKAROUND, + pr_warn(FW_BUG "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - drhd->reg_base_addr, + rhsa->base_address, dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); return 0; } @@ -827,14 +837,14 @@ int __init dmar_table_init(void) static void warn_invalid_dmar(u64 addr, const char *message) { - WARN_TAINT_ONCE( - 1, TAINT_FIRMWARE_WORKAROUND, + pr_warn_once(FW_BUG "Your BIOS is broken; DMAR reported at address %llx%s!\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", addr, message, dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); } static int __ref diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index c1257bef553c..3eb1fe240fb0 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -33,38 +33,42 @@ struct iommu_regset { #define IOMMU_REGSET_ENTRY(_reg_) \ { DMAR_##_reg_##_REG, __stringify(_reg_) } -static const struct iommu_regset iommu_regs[] = { + +static const struct iommu_regset iommu_regs_32[] = { IOMMU_REGSET_ENTRY(VER), - IOMMU_REGSET_ENTRY(CAP), - IOMMU_REGSET_ENTRY(ECAP), IOMMU_REGSET_ENTRY(GCMD), IOMMU_REGSET_ENTRY(GSTS), - IOMMU_REGSET_ENTRY(RTADDR), - IOMMU_REGSET_ENTRY(CCMD), IOMMU_REGSET_ENTRY(FSTS), IOMMU_REGSET_ENTRY(FECTL), IOMMU_REGSET_ENTRY(FEDATA), IOMMU_REGSET_ENTRY(FEADDR), IOMMU_REGSET_ENTRY(FEUADDR), - IOMMU_REGSET_ENTRY(AFLOG), IOMMU_REGSET_ENTRY(PMEN), IOMMU_REGSET_ENTRY(PLMBASE), IOMMU_REGSET_ENTRY(PLMLIMIT), + IOMMU_REGSET_ENTRY(ICS), + IOMMU_REGSET_ENTRY(PRS), + IOMMU_REGSET_ENTRY(PECTL), + IOMMU_REGSET_ENTRY(PEDATA), + IOMMU_REGSET_ENTRY(PEADDR), + IOMMU_REGSET_ENTRY(PEUADDR), +}; + +static const struct iommu_regset iommu_regs_64[] = { + IOMMU_REGSET_ENTRY(CAP), + IOMMU_REGSET_ENTRY(ECAP), + IOMMU_REGSET_ENTRY(RTADDR), + IOMMU_REGSET_ENTRY(CCMD), + IOMMU_REGSET_ENTRY(AFLOG), IOMMU_REGSET_ENTRY(PHMBASE), IOMMU_REGSET_ENTRY(PHMLIMIT), IOMMU_REGSET_ENTRY(IQH), IOMMU_REGSET_ENTRY(IQT), IOMMU_REGSET_ENTRY(IQA), - IOMMU_REGSET_ENTRY(ICS), IOMMU_REGSET_ENTRY(IRTA), IOMMU_REGSET_ENTRY(PQH), IOMMU_REGSET_ENTRY(PQT), IOMMU_REGSET_ENTRY(PQA), - IOMMU_REGSET_ENTRY(PRS), - IOMMU_REGSET_ENTRY(PECTL), - IOMMU_REGSET_ENTRY(PEDATA), - IOMMU_REGSET_ENTRY(PEADDR), - IOMMU_REGSET_ENTRY(PEUADDR), IOMMU_REGSET_ENTRY(MTRRCAP), IOMMU_REGSET_ENTRY(MTRRDEF), IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000), @@ -127,10 +131,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused) * by adding the offset to the pointer (virtual address). */ raw_spin_lock_irqsave(&iommu->register_lock, flag); - for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) { - value = dmar_readq(iommu->reg + iommu_regs[i].offset); + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) { + value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); + seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", + iommu_regs_32[i].regs, iommu_regs_32[i].offset, + value); + } + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) { + value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", - iommu_regs[i].regs, iommu_regs[i].offset, + iommu_regs_64[i].regs, iommu_regs_64[i].offset, value); } raw_spin_unlock_irqrestore(&iommu->register_lock, flag); @@ -272,9 +282,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; + u32 sts; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_TES)) { + seq_printf(m, "DMA Remapping is not enabled on %s\n", + iommu->name); + continue; + } root_tbl_walk(m, iommu); seq_putc(m, '\n'); } @@ -415,6 +432,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused) struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; u64 irta; + u32 sts; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { @@ -424,7 +442,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused) seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n", iommu->name); - if (iommu->ir_table) { + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); + if (iommu->ir_table && (sts & DMA_GSTS_IRES)) { irta = virt_to_phys(iommu->ir_table->base); seq_printf(m, " IR table address:%llx\n", irta); ir_tbl_remap_entry_show(m, iommu); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 9dc37672bf89..4be549478691 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -762,6 +762,11 @@ static int iommu_dummy(struct device *dev) return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; } +static bool attach_deferred(struct device *dev) +{ + return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; +} + /** * is_downstream_to_pci_bridge - test if a device belongs to the PCI * sub-hierarchy of a candidate PCI-PCI bridge @@ -2510,8 +2515,7 @@ struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; - if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO || - dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)) + if (unlikely(attach_deferred(dev) || iommu_dummy(dev))) return NULL; if (dev_is_pci(dev)) @@ -2525,18 +2529,14 @@ struct dmar_domain *find_domain(struct device *dev) return NULL; } -static struct dmar_domain *deferred_attach_domain(struct device *dev) +static void do_deferred_attach(struct device *dev) { - if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { - struct iommu_domain *domain; - - dev->archdata.iommu = NULL; - domain = iommu_get_domain_for_dev(dev); - if (domain) - intel_iommu_attach_device(domain, dev); - } + struct iommu_domain *domain; - return find_domain(dev); + dev->archdata.iommu = NULL; + domain = iommu_get_domain_for_dev(dev); + if (domain) + intel_iommu_attach_device(domain, dev); } static inline struct device_domain_info * @@ -2916,7 +2916,7 @@ static int identity_mapping(struct device *dev) struct device_domain_info *info; info = dev->archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) + if (info) return (info->domain == si_domain); return 0; @@ -3587,6 +3587,9 @@ static bool iommu_need_mapping(struct device *dev) if (iommu_dummy(dev)) return false; + if (unlikely(attach_deferred(dev))) + do_deferred_attach(dev); + ret = identity_mapping(dev); if (ret) { u64 dma_mask = *dev->dma_mask; @@ -3635,7 +3638,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, BUG_ON(dir == DMA_NONE); - domain = deferred_attach_domain(dev); + domain = find_domain(dev); if (!domain) return DMA_MAPPING_ERROR; @@ -3855,7 +3858,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele if (!iommu_need_mapping(dev)) return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); - domain = deferred_attach_domain(dev); + domain = find_domain(dev); if (!domain) return 0; @@ -3950,7 +3953,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, int prot = 0; int ret; - domain = deferred_attach_domain(dev); + if (unlikely(attach_deferred(dev))) + do_deferred_attach(dev); + + domain = find_domain(dev); + if (WARN_ON(dir == DMA_NONE || !domain)) return DMA_MAPPING_ERROR; @@ -4254,10 +4261,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) /* we know that the this iommu should be at offset 0xa000 from vtbar */ drhd = dmar_find_matched_drhd_unit(pdev); - if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, - TAINT_FIRMWARE_WORKAROUND, - "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); @@ -4453,14 +4461,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) struct dmar_rmrr_unit *rmrru; rmrr = (struct acpi_dmar_reserved_memory *)header; - if (rmrr_sanity_check(rmrr)) - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, + if (rmrr_sanity_check(rmrr)) { + pr_warn(FW_BUG "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", rmrr->base_address, rmrr->end_address, dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + } rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) @@ -5123,6 +5133,9 @@ int __init intel_iommu_init(void) down_write(&dmar_global_lock); + if (!no_iommu) + intel_iommu_debugfs_init(); + if (no_iommu || dmar_disabled) { /* * We exit the function here to ensure IOMMU's remapping and @@ -5186,6 +5199,7 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); + down_read(&dmar_global_lock); for_each_active_iommu(iommu, drhd) { iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, @@ -5193,6 +5207,7 @@ int __init intel_iommu_init(void) iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); iommu_device_register(&iommu->iommu); } + up_read(&dmar_global_lock); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) @@ -5203,7 +5218,6 @@ int __init intel_iommu_init(void) down_read(&dmar_global_lock); if (probe_acpi_namespace_devices()) pr_warn("ACPI name space devices didn't probe correctly\n"); - up_read(&dmar_global_lock); /* Finally, we enable the DMA remapping hardware. */ for_each_iommu(iommu, drhd) { @@ -5212,10 +5226,11 @@ int __init intel_iommu_init(void) iommu_disable_protect_mem_regions(iommu); } + up_read(&dmar_global_lock); + pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); intel_iommu_enabled = 1; - intel_iommu_debugfs_init(); return 0; @@ -5693,8 +5708,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, u64 phys = 0; pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); - if (pte) - phys = dma_pte_addr(pte); + if (pte && dma_pte_present(pte)) + phys = dma_pte_addr(pte) + + (iova & (BIT_MASK(level_to_offset_bits(level) + + VTD_PAGE_SHIFT) - 1)); return phys; } @@ -6133,7 +6150,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { - return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; + return attach_deferred(dev); } static int diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 983b08477e64..04fbd4bf0ff9 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -468,7 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte *ptep = data->pgd; int ret, lvl = data->start_level; arm_lpae_iopte prot; - long iaext = (long)iova >> cfg->ias; + long iaext = (s64)iova >> cfg->ias; /* If no access, then nothing to do */ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) @@ -645,7 +645,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; - long iaext = (long)iova >> cfg->ias; + long iaext = (s64)iova >> cfg->ias; if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return 0; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 39759db4f003..4328da0b0a9f 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -344,21 +344,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); - if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ - return; - iommu_put_dma_cookie(domain); - /* NOTE: unmap can be called after client device is powered off, - * for example, with GPUs or anything involving dma-buf. So we - * cannot rely on the device_link. Make sure the IOMMU is on to - * avoid unclocked accesses in the TLB inv path: - */ - pm_runtime_get_sync(qcom_domain->iommu->dev); - - free_io_pgtable_ops(qcom_domain->pgtbl_ops); - - pm_runtime_put_sync(qcom_domain->iommu->dev); + if (qcom_domain->iommu) { + /* + * NOTE: unmap can be called after client device is powered + * off, for example, with GPUs or anything involving dma-buf. + * So we cannot rely on the device_link. Make sure the IOMMU + * is on to avoid unclocked accesses in the TLB inv path: + */ + pm_runtime_get_sync(qcom_domain->iommu->dev); + free_io_pgtable_ops(qcom_domain->pgtbl_ops); + pm_runtime_put_sync(qcom_domain->iommu->dev); + } kfree(qcom_domain); } @@ -404,7 +402,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); unsigned i; - if (!qcom_domain->iommu) + if (WARN_ON(!qcom_domain->iommu)) return; pm_runtime_get_sync(qcom_iommu->dev); @@ -417,8 +415,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ctx->domain = NULL; } pm_runtime_put_sync(qcom_iommu->dev); - - qcom_domain->iommu = NULL; } static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 6d397732138d..24fe08702ef7 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -458,7 +458,7 @@ config IMX_IRQSTEER Support for the i.MX IRQSTEER interrupt multiplexer/remapper. config IMX_INTMUX - def_bool y if ARCH_MXC + def_bool y if ARCH_MXC || COMPILE_TEST select IRQ_DOMAIN help Support for the i.MX INTMUX interrupt multiplexer. diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index bb1ad451392f..2c999dc310c1 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d) irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_gc_unlock(gc); - return 0; + return 1; } static int aic_set_type(struct irq_data *d, unsigned type) diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 29333497ba10..fc1b3a9cdafc 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d) irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); irq_gc_unlock(bgc); - return 0; + return 1; } static int aic5_set_type(struct irq_data *d, unsigned type) diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 418245d31921..a1e004af23e7 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -61,6 +61,7 @@ | SHORTCUT1_MASK | SHORTCUT2_MASK) #define REG_FIQ_CONTROL 0x0c +#define FIQ_CONTROL_ENABLE BIT(7) #define NR_BANKS 3 #define IRQS_PER_BANK 32 @@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node, { void __iomem *base; int irq, b, i; + u32 reg; base = of_iomap(node, 0); if (!base) @@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node, handle_level_irq); irq_set_probe(irq); } + + reg = readl_relaxed(intc.enable[b]); + if (reg) { + writel_relaxed(reg, intc.disable[b]); + pr_err(FW_BUG "Bootloader left irq enabled: " + "bank %d irq %*pbl\n", b, IRQS_PER_BANK, ®); + } + } + + reg = readl_relaxed(base + REG_FIQ_CONTROL); + if (reg & FIQ_CONTROL_ENABLE) { + writel_relaxed(0, base + REG_FIQ_CONTROL); + pr_err(FW_BUG "Bootloader left fiq enabled\n"); } if (is_2836) { diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index cbf01afcd2a6..eb9bce93cd05 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -50,7 +50,7 @@ struct bcm7038_l1_chip { struct bcm7038_l1_cpu { void __iomem *map_base; - u32 mask_cache[0]; + u32 mask_cache[]; }; /* diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 83b1186ffcad..54d142ccc63a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -96,6 +96,7 @@ struct its_node { struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; + void __iomem *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; @@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + static u16 get_its_list(struct its_vm *vm) { struct its_node *its; @@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm) if (!is_v4(its)) continue; - if (vm->vlpi_count[its->list_nr]) + if (require_its_list_vmovp(vm, its)) __set_bit(its->list_nr, &its_list); } @@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) return NULL; } -static int irq_to_cpuid(struct irq_data *d) +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map = get_vlpi_map(d); + int cpu; - if (map) - return map->vpe->col_idx; + if (map) { + cpu = vpe_to_cpuid_lock(map->vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vlpi_map *map = get_vlpi_map(d); - return its_dev->event_map.col_map[its_get_event_id(d)]; + if (map) + vpe_to_cpuid_unlock(map->vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) @@ -353,6 +389,15 @@ struct its_cmd_desc { struct { struct its_vpe *vpe; } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; }; }; @@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db) its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); } +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ @@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its, return valid_vpe(its, desc->its_invdb_cmd.vpe); } +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe) if (!is_v4(its)) continue; - if (!vpe->its_vm->vlpi_count[its->list_nr]) + if (!require_its_list_vmovp(vpe->its_vm, its)) continue; desc.its_vmovp_cmd.col = &its->collections[col_id]; @@ -1321,7 +1411,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) static void wait_for_syncr(void __iomem *rdbase) { - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) cpu_relax(); } @@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d) { struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; + unsigned long flags; u64 val; + int cpu; if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d) } /* Target the redistributor this LPI is currently routed to */ - rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) @@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d, return 0; } +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + static void its_map_vm(struct its_node *its, struct its_vm *vm) { unsigned long flags; - /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) + if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); @@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm) unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) + if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); @@ -2036,18 +2151,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser, } static int its_setup_baser(struct its_node *its, struct its_baser *baser, - u64 cache, u64 shr, u32 psz, u32 order, - bool indirect) + u64 cache, u64 shr, u32 order, bool indirect) { u64 val = its_read_baser(its, baser); u64 esz = GITS_BASER_ENTRY_SIZE(val); u64 type = GITS_BASER_TYPE(val); u64 baser_phys, tmp; - u32 alloc_pages; + u32 alloc_pages, psz; struct page *page; void *base; -retry_alloc_baser: + psz = baser->psz; alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); if (alloc_pages > GITS_BASER_PAGES_MAX) { pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", @@ -2120,25 +2234,6 @@ retry_baser: goto retry_baser; } - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { - /* - * Page size didn't stick. Let's try a smaller - * size and retry. If we reach 4K, then - * something is horribly wrong... - */ - free_pages((unsigned long)base, order); - baser->base = NULL; - - switch (psz) { - case SZ_16K: - psz = SZ_4K; - goto retry_alloc_baser; - case SZ_64K: - psz = SZ_16K; - goto retry_alloc_baser; - } - } - if (val != tmp) { pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], @@ -2164,13 +2259,14 @@ retry_baser: static bool its_parse_indirect_baser(struct its_node *its, struct its_baser *baser, - u32 psz, u32 *order, u32 ids) + u32 *order, u32 ids) { u64 tmp = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(tmp); u64 esz = GITS_BASER_ENTRY_SIZE(tmp); u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u32 new_order = *order; + u32 psz = baser->psz; bool indirect = false; /* No need to enable Indirection if memory requirement < (psz*2)bytes */ @@ -2288,11 +2384,58 @@ static void its_free_tables(struct its_node *its) } } +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + static int its_alloc_tables(struct its_node *its) { u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_RaWaWb; - u32 psz = SZ_64K; int err, i; if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) @@ -2303,16 +2446,22 @@ static int its_alloc_tables(struct its_node *its) struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(val); - u32 order = get_order(psz); bool indirect = false; + u32 order; - switch (type) { - case GITS_BASER_TYPE_NONE: + if (type == GITS_BASER_TYPE_NONE) continue; + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { case GITS_BASER_TYPE_DEVICE: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, device_ids(its)); break; @@ -2328,20 +2477,18 @@ static int its_alloc_tables(struct its_node *its) } } - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, ITS_MAX_VPEID_BITS); break; } - err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + err = its_setup_baser(its, baser, cache, shr, order, indirect); if (err < 0) { its_free_tables(its); return err; } /* Update settings which will be used for next BASERn */ - psz = baser->psz; cache = baser->val & GITS_BASER_CACHEABILITY_MASK; shr = baser->val & GITS_BASER_SHAREABILITY_MASK; } @@ -2452,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) if (!gic_rdists->has_rvpeid) return true; + /* Skip non-present CPUs */ + if (!base) + return true; + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; @@ -3482,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d, { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); int from, cpu = cpumask_first(mask_val); + unsigned long flags; /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. */ - if (vpe->col_idx == cpu) + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) goto out; - from = vpe->col_idx; vpe->col_idx = cpu; /* @@ -3508,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d, out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); return IRQ_SET_MASK_OK_DONE; } @@ -3528,7 +3688,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_NonShareable; + val |= GICR_VPENDBASER_InnerShareable; /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -3619,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d) void __iomem *rdbase; /* Target the redistributor this VPE is currently known on */ + raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } else { its_vpe_send_cmd(vpe, its_send_inv); } @@ -3675,12 +3837,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; @@ -3782,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe) val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); /* Target the redistributor this vPE is currently known on */ + raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) @@ -3818,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = { .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, }; +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targetting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); @@ -3851,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe) return -ENOMEM; } + raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; if (gic_rdists->has_rvpeid) @@ -3960,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; - /* If we use the list map, we issue VMAPP on demand... */ - if (its_list_map) + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) return 0; /* Map the VPE to the first possible CPU */ @@ -3987,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct its_node *its; /* - * If we use the list map, we unmap the VPE once no VLPIs are - * associated with the VM. + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. */ - if (its_list_map) + if (!gic_requires_eager_mapping()) return; list_for_each_entry(its, &its_nodes, entry) { @@ -4404,7 +4796,7 @@ static int __init its_probe_one(struct resource *res, struct page *page; int err; - its_base = ioremap(res->start, resource_size(res)); + its_base = ioremap(res->start, SZ_64K); if (!its_base) { pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); return -ENOMEM; @@ -4455,6 +4847,13 @@ static int __init its_probe_one(struct resource *res, if (is_v4_1(its)) { u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + + its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out_free_its; + } + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", @@ -4468,7 +4867,7 @@ static int __init its_probe_one(struct resource *res, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; - goto out_free_its; + goto out_unmap_sgir; } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; @@ -4535,6 +4934,9 @@ out_free_tables: its_free_tables(its); out_free_cmd: free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); out_free_its: kfree(its); out_unmap: @@ -4818,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct device_node *of_node; struct its_node *its; bool has_v4 = false; + bool has_v4_1 = false; int err; gic_rdists = rdists; @@ -4838,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, if (err) return err; - list_for_each_entry(its, &its_nodes, entry) + list_for_each_entry(its, &its_nodes, entry) { has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + if (its_init_vpe_domain() || - its_init_v4(parent_domain, &its_vpe_domain_ops)) { + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index c1f7af9d9ae7..9dbc81b6f62e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -34,6 +34,7 @@ #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) struct redist_region { void __iomem *redist_base; @@ -723,6 +724,7 @@ static void __init gic_dist_init(void) unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; + u32 val; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); @@ -755,9 +757,14 @@ static void __init gic_dist_init(void) /* Now do the common stuff, and wait for the distributor to drain */ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + /* Enable distributor with ARE, Group1 */ - writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, - base + GICD_CTLR); + writel_relaxed(val, base + GICD_CTLR); /* * Set all global interrupts to the boot CPU only. ARE must be @@ -828,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - region->redist_base; + raw_spin_lock_init(&gic_data_rdist()->rd_lock); gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = region->phys_base + offset; @@ -1464,6 +1472,15 @@ static bool gic_enable_quirk_msm8996(void *data) return true; } +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; +} + static bool gic_enable_quirk_hip06_07(void *data) { struct gic_chip_data *d = data; @@ -1503,6 +1520,19 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_hip06_07, }, { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { } }; @@ -1577,11 +1607,15 @@ static int __init gic_init_bases(void __iomem *dist_base, pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); - gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); - irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; @@ -1592,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base, goto out_free; } + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); pr_info("Distributor has %sRange Selector support\n", gic_data.has_rss ? "" : "no "); @@ -1757,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } @@ -2072,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void) } gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 45969927cc81..0c18714ae13e 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -85,6 +85,53 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; +static const struct irq_domain_ops *sgi_domain_ops; + +static bool has_v4_1(void) +{ + return !!sgi_domain_ops; +} + +static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) +{ + char *name; + int sgi_base; + + if (!has_v4_1()) + return 0; + + name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); + if (!name) + goto err; + + vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); + if (!vpe->fwnode) + goto err; + + kfree(name); + name = NULL; + + vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, + sgi_domain_ops, vpe); + if (!vpe->sgi_domain) + goto err; + + sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, + NUMA_NO_NODE, vpe, + false, NULL); + if (sgi_base <= 0) + goto err; + + return 0; + +err: + if (vpe->sgi_domain) + irq_domain_remove(vpe->sgi_domain); + if (vpe->fwnode) + irq_domain_free_fwnode(vpe->fwnode); + kfree(name); + return -ENOMEM; +} int its_alloc_vcpu_irqs(struct its_vm *vm) { @@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) if (vpe_base_irq <= 0) goto err; - for (i = 0; i < vm->nr_vpes; i++) + for (i = 0; i < vm->nr_vpes; i++) { + int ret; vm->vpes[i]->irq = vpe_base_irq + i; + ret = its_alloc_vcpu_sgis(vm->vpes[i], i); + if (ret) + goto err; + } return 0; @@ -126,8 +178,28 @@ err: return -ENOMEM; } +static void its_free_sgi_irqs(struct its_vm *vm) +{ + int i; + + if (!has_v4_1()) + return; + + for (i = 0; i < vm->nr_vpes; i++) { + unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0); + + if (WARN_ON(!irq)) + continue; + + irq_domain_free_irqs(irq, 16); + irq_domain_remove(vm->vpes[i]->sgi_domain); + irq_domain_free_fwnode(vm->vpes[i]->fwnode); + } +} + void its_free_vcpu_irqs(struct its_vm *vm) { + its_free_sgi_irqs(vm); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_remove(vm->domain); irq_domain_free_fwnode(vm->fwnode); @@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) return irq_set_vcpu_affinity(vpe->irq, info); } -int its_schedule_vpe(struct its_vpe *vpe, bool on) +int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) +{ + struct irq_desc *desc = irq_to_desc(vpe->irq); + struct its_cmd_info info = { }; + int ret; + + WARN_ON(preemptible()); + + info.cmd_type = DESCHEDULE_VPE; + if (has_v4_1()) { + /* GICv4.1 can directly deal with doorbells */ + info.req_db = db; + } else { + /* Undo the nested disable_irq() calls... */ + while (db && irqd_irq_disabled(&desc->irq_data)) + enable_irq(vpe->irq); + } + + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->resident = false; + + return ret; +} + +int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) { - struct its_cmd_info info; + struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); - info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; + info.cmd_type = SCHEDULE_VPE; + if (has_v4_1()) { + info.g0en = g0en; + info.g1en = g1en; + } else { + /* Disabled the doorbell, as we're about to enter the guest */ + disable_irq_nosync(vpe->irq); + } ret = its_send_vpe_cmd(vpe, &info); if (!ret) - vpe->resident = on; + vpe->resident = true; return ret; } @@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } -int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) +int its_prop_update_vsgi(int irq, u8 priority, bool group) +{ + struct its_cmd_info info = { + .cmd_type = PROP_UPDATE_VSGI, + { + .priority = priority, + .group = group, + }, + }; + + return irq_set_vcpu_affinity(irq, &info); +} + +int its_init_v4(struct irq_domain *domain, + const struct irq_domain_ops *vpe_ops, + const struct irq_domain_ops *sgi_ops) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; - vpe_domain_ops = ops; + vpe_domain_ops = vpe_ops; + sgi_domain_ops = sgi_ops; return 0; } diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index d000870d9b6b..b6f6aa7b2862 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .name = "cascade", - .flags = IRQF_NO_THREAD, -}; - static struct resource pic1_io_resource = { .name = "pic1", .start = PIC_MASTER_CMD, @@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = { */ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) { + /* + * PIC_CASCADE_IR is cascade interrupt to second interrupt controller + */ + int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR; struct irq_domain *domain; insert_resource(&ioport_resource, &pic1_io_resource); @@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) if (!domain) panic("Failed to add i8259 IRQ domain"); - setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); + if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL)) + pr_err("Failed to register cascade interrupt\n"); register_syscore_ops(&i8259_syscore_ops); return domain; } diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 6d05cefe9d79..7a7222d4c19c 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -180,3 +180,4 @@ err_free_tcu: IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index c5589ee0dfb3..9f3da4260ca6 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data) return IRQ_HANDLED; } -static struct irqaction intc_cascade_action = { - .handler = intc_cascade, - .name = "SoC intc cascade interrupt", -}; - static int __init ingenic_intc_of_init(struct device_node *node, unsigned num_chips) { @@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node, irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); } - setup_irq(parent_irq, &intc_cascade_action); + if (request_irq(parent_irq, intc_cascade, 0, + "SoC intc cascade interrupt", NULL)) + pr_err("Failed to register SoC intc cascade interrupt\n"); return 0; out_domain_remove: diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 6e5e3172796b..3819185bfd02 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) } i->iomem = devm_ioremap(dev, io[k]->start, - resource_size(io[k])); + resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index aa4af886e43a..c34fb3ae0ff8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -4,6 +4,7 @@ * Copyright (C) 2018 Christoph Hellwig */ #define pr_fmt(fmt) "plic: " fmt +#include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> @@ -55,7 +56,14 @@ #define CONTEXT_THRESHOLD 0x00 #define CONTEXT_CLAIM 0x04 -static void __iomem *plic_regs; +#define PLIC_DISABLE_THRESHOLD 0xf +#define PLIC_ENABLE_THRESHOLD 0 + +struct plic_priv { + struct cpumask lmask; + struct irq_domain *irqdomain; + void __iomem *regs; +}; struct plic_handler { bool present; @@ -66,6 +74,7 @@ struct plic_handler { */ raw_spinlock_t enable_lock; void __iomem *enable_base; + struct plic_priv *priv; }; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); @@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler, } static inline void plic_irq_toggle(const struct cpumask *mask, - int hwirq, int enable) + struct irq_data *d, int enable) { int cpu; + struct plic_priv *priv = irq_get_chip_data(d->irq); - writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); + writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); - if (handler->present) - plic_toggle(handler, hwirq, enable); + if (handler->present && + cpumask_test_cpu(cpu, &handler->priv->lmask)) + plic_toggle(handler, d->hwirq, enable); } } static void plic_irq_unmask(struct irq_data *d) { - unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), - cpu_online_mask); + struct cpumask amask; + unsigned int cpu; + struct plic_priv *priv = irq_get_chip_data(d->irq); + + cpumask_and(&amask, &priv->lmask, cpu_online_mask); + cpu = cpumask_any_and(irq_data_get_affinity_mask(d), + &amask); if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) return; - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + plic_irq_toggle(cpumask_of(cpu), d, 1); } static void plic_irq_mask(struct irq_data *d) { - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + struct plic_priv *priv = irq_get_chip_data(d->irq); + + plic_irq_toggle(&priv->lmask, d, 0); } #ifdef CONFIG_SMP @@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; + struct cpumask amask; + struct plic_priv *priv = irq_get_chip_data(d->irq); + + cpumask_and(&amask, &priv->lmask, mask_val); if (force) - cpu = cpumask_first(mask_val); + cpu = cpumask_first(&amask); else - cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpu = cpumask_any_and(&amask, cpu_online_mask); if (cpu >= nr_cpu_ids) return -EINVAL; - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + plic_irq_toggle(&priv->lmask, d, 0); + plic_irq_toggle(cpumask_of(cpu), d, 1); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = { .free = irq_domain_free_irqs_top, }; -static struct irq_domain *plic_irqdomain; - /* * Handling an interrupt is a two-step process: first you claim the interrupt * by reading the claim register, then you complete the interrupt by writing @@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs) csr_clear(CSR_IE, IE_EIE); while ((hwirq = readl(claim))) { - int irq = irq_find_mapping(plic_irqdomain, hwirq); + int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); if (unlikely(irq <= 0)) pr_warn_ratelimited("can't find mapping for hwirq %lu\n", @@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node) return -1; } +static void plic_set_threshold(struct plic_handler *handler, u32 threshold) +{ + /* priority must be > threshold to trigger an interrupt */ + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); +} + +static int plic_dying_cpu(unsigned int cpu) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + csr_clear(CSR_IE, IE_EIE); + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); + + return 0; +} + +static int plic_starting_cpu(unsigned int cpu) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + csr_set(CSR_IE, IE_EIE); + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); + + return 0; +} + static int __init plic_init(struct device_node *node, struct device_node *parent) { int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; + struct plic_priv *priv; - if (plic_regs) { - pr_warn("PLIC already present.\n"); - return -ENXIO; - } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - plic_regs = of_iomap(node, 0); - if (WARN_ON(!plic_regs)) - return -EIO; + priv->regs = of_iomap(node, 0); + if (WARN_ON(!priv->regs)) { + error = -EIO; + goto out_free_priv; + } error = -EINVAL; of_property_read_u32(node, "riscv,ndev", &nr_irqs); @@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node, goto out_iounmap; error = -ENOMEM; - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, - &plic_irqdomain_ops, NULL); - if (WARN_ON(!plic_irqdomain)) + priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, + &plic_irqdomain_ops, priv); + if (WARN_ON(!priv->irqdomain)) goto out_iounmap; for (i = 0; i < nr_contexts; i++) { @@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node, struct plic_handler *handler; irq_hw_number_t hwirq; int cpu, hartid; - u32 threshold = 0; if (of_irq_parse_one(node, i, &parent)) { pr_err("failed to parse parent for context %d.\n", i); @@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node, handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { pr_warn("handler already present for context %d.\n", i); - threshold = 0xffffffff; + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); goto done; } + cpumask_set_cpu(cpu, &priv->lmask); handler->present = true; handler->hart_base = - plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; + priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; raw_spin_lock_init(&handler->enable_lock); handler->enable_base = - plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; - + priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; + handler->priv = priv; done: - /* priority must be > threshold to trigger an interrupt */ - writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); nr_handlers++; } + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + "irqchip/sifive/plic:starting", + plic_starting_cpu, plic_dying_cpu); pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", nr_irqs, nr_handlers, nr_contexts); set_handle_irq(plic_handle_irq); return 0; out_iounmap: - iounmap(plic_regs); + iounmap(priv->regs); +out_free_priv: + kfree(priv); return error; } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index e00f2fa27f00..faa8482c8246 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void) unregister_syscore_ops(&stm32_exti_h_syscore_ops); } +static int stm32_exti_h_retrigger(struct irq_data *d) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; + void __iomem *base = chip_data->host_data->base; + u32 mask = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(mask, base + stm32_bank->swier_ofst); + + return 0; +} + static struct irq_chip stm32_exti_h_chip = { .name = "stm32-exti-h", .irq_eoi = stm32_exti_h_eoi, .irq_mask = stm32_exti_h_mask, .irq_unmask = stm32_exti_h_unmask, - .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_retrigger = stm32_exti_h_retrigger, .irq_set_type = stm32_exti_h_set_type, .irq_set_wake = stm32_exti_h_set_wake, .flags = IRQCHIP_MASK_ON_SUSPEND, diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 928858dada75..f1386733d3bc 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -6,6 +6,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> @@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) static void fpga_irq_handle(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - u32 status = readl(f->base + IRQ_STATUS); + u32 status; + + chained_irq_enter(chip, desc); + status = readl(f->base + IRQ_STATUS); if (status == 0) { do_bad_IRQ(desc); - return; + goto out; } do { @@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); + +out: + chained_irq_exit(chip, desc); } /* @@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { @@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); - writel(clear_mask, base + IRQ_ENABLE_CLEAR); - writel(clear_mask, base + FIQ_ENABLE_CLEAR); - /* * On Versatile AB/PB, some secondary interrupts have a direct * pass-thru to the primary controller for IRQs 20 and 22-31 which need diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index f3f20a3cff50..3c87d925f74c 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node, void __iomem *regs; u32 interrupt_mask = ~0; u32 wakeup_mask = ~0; - - if (WARN(parent, "non-root VICs are not supported")) - return -EINVAL; + int parent_irq; regs = of_iomap(node, 0); if (WARN_ON(!regs)) @@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node, of_property_read_u32(node, "valid-mask", &interrupt_mask); of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); + parent_irq = of_irq_get(node, 0); + if (parent_irq < 0) + parent_irq = 0; /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); + __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node); return 0; } diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index e3043ded8973..7f811fe5bf69 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -38,29 +38,31 @@ struct xintc_irq_chip { void __iomem *base; struct irq_domain *root_domain; u32 intr_mask; + u32 nr_irq; }; -static struct xintc_irq_chip *xintc_irqc; +static struct xintc_irq_chip *primary_intc; -static void xintc_write(int reg, u32 data) +static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data) { if (static_branch_unlikely(&xintc_is_be)) - iowrite32be(data, xintc_irqc->base + reg); + iowrite32be(data, irqc->base + reg); else - iowrite32(data, xintc_irqc->base + reg); + iowrite32(data, irqc->base + reg); } -static unsigned int xintc_read(int reg) +static u32 xintc_read(struct xintc_irq_chip *irqc, int reg) { if (static_branch_unlikely(&xintc_is_be)) - return ioread32be(xintc_irqc->base + reg); + return ioread32be(irqc->base + reg); else - return ioread32(xintc_irqc->base + reg); + return ioread32(irqc->base + reg); } static void intc_enable_or_unmask(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); @@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d) * acks the irq before calling the interrupt handler */ if (irqd_is_level_type(d)) - xintc_write(IAR, mask); + xintc_write(irqc, IAR, mask); - xintc_write(SIE, mask); + xintc_write(irqc, SIE, mask); } static void intc_disable_or_mask(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); - xintc_write(CIE, 1 << d->hwirq); + xintc_write(irqc, CIE, BIT(d->hwirq)); } static void intc_ack(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); - xintc_write(IAR, 1 << d->hwirq); + xintc_write(irqc, IAR, BIT(d->hwirq)); } static void intc_mask_ack(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); - xintc_write(CIE, mask); - xintc_write(IAR, mask); + xintc_write(irqc, CIE, mask); + xintc_write(irqc, IAR, mask); } static struct irq_chip intc_dev = { @@ -103,13 +110,14 @@ static struct irq_chip intc_dev = { .irq_mask_ack = intc_mask_ack, }; -unsigned int xintc_get_irq(void) +static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc) { - unsigned int hwirq, irq = -1; + unsigned int irq = 0; + u32 hwirq; - hwirq = xintc_read(IVR); + hwirq = xintc_read(irqc, IVR); if (hwirq != -1U) - irq = irq_find_mapping(xintc_irqc->root_domain, hwirq); + irq = irq_find_mapping(irqc->root_domain, hwirq); pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); @@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void) static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - if (xintc_irqc->intr_mask & (1 << hw)) { + struct xintc_irq_chip *irqc = d->host_data; + + if (irqc->intr_mask & BIT(hw)) { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_edge_irq, "edge"); + handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_level_irq, "level"); + handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } + irq_set_chip_data(irq, irqc); return 0; } @@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = { static void xil_intc_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); + struct xintc_irq_chip *irqc; u32 pending; + irqc = irq_data_get_irq_handler_data(&desc->irq_data); chained_irq_enter(chip, desc); do { - pending = xintc_get_irq(); - if (pending == -1U) + pending = xintc_get_irq_local(irqc); + if (pending == 0) break; generic_handle_irq(pending); } while (true); chained_irq_exit(chip, desc); } +static void xil_intc_handle_irq(struct pt_regs *regs) +{ + u32 hwirq; + struct xintc_irq_chip *irqc = primary_intc; + + do { + hwirq = xintc_read(irqc, IVR); + if (likely(hwirq != -1U)) { + int ret; + + ret = handle_domain_irq(irqc->root_domain, hwirq, regs); + WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq); + continue; + } + + break; + } while (1); +} + static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { - u32 nr_irq; - int ret, irq; struct xintc_irq_chip *irqc; - - if (xintc_irqc) { - pr_err("irq-xilinx: Multiple instances aren't supported\n"); - return -EINVAL; - } + int ret, irq; irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); if (!irqc) return -ENOMEM; - - xintc_irqc = irqc; - irqc->base = of_iomap(intc, 0); BUG_ON(!irqc->base); - ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); + ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq); if (ret < 0) { pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); - goto err_alloc; + goto error; } ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); @@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc, irqc->intr_mask = 0; } - if (irqc->intr_mask >> nr_irq) + if (irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", - intc, nr_irq, irqc->intr_mask); + intc, irqc->nr_irq, irqc->intr_mask); /* * Disable all external interrupts until they are * explicity requested. */ - xintc_write(IER, 0); + xintc_write(irqc, IER, 0); /* Acknowledge any pending interrupts just in case. */ - xintc_write(IAR, 0xffffffff); + xintc_write(irqc, IAR, 0xffffffff); /* Turn on the Master Enable. */ - xintc_write(MER, MER_HIE | MER_ME); - if (!(xintc_read(MER) & (MER_HIE | MER_ME))) { + xintc_write(irqc, MER, MER_HIE | MER_ME); + if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) { static_branch_enable(&xintc_is_be); - xintc_write(MER, MER_HIE | MER_ME); + xintc_write(irqc, MER, MER_HIE | MER_ME); } - irqc->root_domain = irq_domain_add_linear(intc, nr_irq, + irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq, &xintc_irq_domain_ops, irqc); if (!irqc->root_domain) { pr_err("irq-xilinx: Unable to create IRQ domain\n"); - goto err_alloc; + ret = -EINVAL; + goto error; } if (parent) { @@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc, } else { pr_err("irq-xilinx: interrupts property not in DT\n"); ret = -EINVAL; - goto err_alloc; + goto error; } } else { - irq_set_default_host(irqc->root_domain); + primary_intc = irqc; + set_handle_irq(xil_intc_handle_irq); } return 0; -err_alloc: - xintc_irqc = NULL; +error: + iounmap(irqc->base); kfree(irqc); return ret; diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index abfe59284ff2..aa54bfcb0433 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -33,7 +33,7 @@ struct combiner { int parent_irq; u32 nirqs; u32 nregs; - struct combiner_reg regs[0]; + struct combiner_reg regs[]; }; static inline int irq_nr(u32 reg, u32 bit) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 7543e395a2c6..db38a68abb6c 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -380,12 +380,11 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) goto err_dev; } - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); + tqueue = blk_alloc_queue(tt->make_rq, dev->q->node); if (!tqueue) { ret = -ENOMEM; goto err_disk; } - blk_queue_make_request(tqueue, tt->make_rq); strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); tdisk->flags = GENHD_FL_EXT_DEVT; diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 7d8958df9472..6387302b03f2 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -37,7 +37,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page) active = 0; up(&rlun->wr_sem); } - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "pblk: pos:%d, ch:%d, lun:%d - %d\n", i, rlun->bppa.a.ch, @@ -120,7 +120,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf; - sz = snprintf(page, PAGE_SIZE, + sz = scnprintf(page, PAGE_SIZE, "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", pblk->addrf_len, ppaf->blk_offset, ppaf->blk_len, @@ -130,7 +130,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) ppaf->pln_offset, ppaf->pln_len, ppaf->sec_offset, ppaf->sec_len); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", gppaf->blk_offset, gppaf->blk_len, gppaf->pg_offset, gppaf->pg_len, @@ -142,7 +142,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) struct nvm_addrf *ppaf = &pblk->addrf; struct nvm_addrf *gppaf = &geo->addrf; - sz = snprintf(page, PAGE_SIZE, + sz = scnprintf(page, PAGE_SIZE, "pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n", pblk->addrf_len, ppaf->ch_offset, ppaf->ch_len, @@ -150,7 +150,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) ppaf->chk_offset, ppaf->chk_len, ppaf->sec_offset, ppaf->sec_len); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n", gppaf->ch_offset, gppaf->ch_len, gppaf->lun_offset, gppaf->lun_len, @@ -278,11 +278,11 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) pblk_err(pblk, "corrupted free line list:%d/%d\n", nr_free_lines, free_line_cnt); - sz = snprintf(page, PAGE_SIZE - sz, + sz = scnprintf(page, PAGE_SIZE - sz, "line: nluns:%d, nblks:%d, nsecs:%d\n", geo->all_luns, lm->blk_per_line, lm->sec_per_line); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n", cur_data, cur_log, nr_free_lines, @@ -292,12 +292,12 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) d_line_cnt, l_line_cnt, l_mg->nr_lines); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n", gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr, atomic_read(&pblk->gc.read_inflight_gc)); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n", cur_data, cur_sec, msecs, vsc, sec_in_line, map_weight, lm->sec_per_line, @@ -313,19 +313,19 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page) struct pblk_line_meta *lm = &pblk->lm; ssize_t sz = 0; - sz = snprintf(page, PAGE_SIZE - sz, + sz = scnprintf(page, PAGE_SIZE - sz, "smeta - len:%d, secs:%d\n", lm->smeta_len, lm->smeta_sec); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "emeta - len:%d, sec:%d, bb_start:%d\n", lm->emeta_len[0], lm->emeta_sec[0], lm->emeta_bb); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "bitmap lengths: sec:%d, blk:%d, lun:%d\n", lm->sec_bitmap_len, lm->blk_bitmap_len, lm->lun_bitmap_len); - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "blk_line:%d, sec_line:%d, sec_blk:%d\n", lm->blk_per_line, lm->sec_per_line, @@ -344,12 +344,12 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad, { int sz; - sz = snprintf(page, PAGE_SIZE, + sz = scnprintf(page, PAGE_SIZE, "user:%lld gc:%lld pad:%lld WA:", user, gc, pad); if (!user) { - sz += snprintf(page + sz, PAGE_SIZE - sz, "NaN\n"); + sz += scnprintf(page + sz, PAGE_SIZE - sz, "NaN\n"); } else { u64 wa_int; u32 wa_frac; @@ -358,7 +358,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad, wa_int = div64_u64(wa_int, user); wa_int = div_u64_rem(wa_int, 100000, &wa_frac); - sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n", + sz += scnprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n", wa_int, wa_frac); } @@ -401,9 +401,9 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page) total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst; if (!total) { for (i = 0; i < (buckets + 1); i++) - sz += snprintf(page + sz, PAGE_SIZE - sz, + sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:0 ", i); - sz += snprintf(page + sz, PAGE_SIZE - sz, "\n"); + sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n"); return sz; } @@ -411,7 +411,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page) for (i = 0; i < buckets; i++) total_buckets += atomic64_read(&pblk->pad_dist[i]); - sz += snprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ", + sz += scnprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ", bucket_percentage(total - total_buckets, total)); for (i = 0; i < buckets; i++) { @@ -419,10 +419,10 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page) p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]), total); - sz += snprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ", + sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ", i + 1, p); } - sz += snprintf(page + sz, PAGE_SIZE - sz, "\n"); + sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n"); return sz; } diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 8c744578122a..a0d87ed9da69 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -300,9 +300,11 @@ static int control_loop(void *dummy) /* i2c probing and setup */ /************************************************************************/ -static int -do_attach( struct i2c_adapter *adapter ) +static void do_attach(struct i2c_adapter *adapter) { + struct i2c_board_info info = { }; + struct device_node *np; + /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */ static const unsigned short scan_ds1775[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, @@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter ) I2C_CLIENT_END }; - if( strncmp(adapter->name, "uni-n", 5) ) - return 0; - - if( !x.running ) { - struct i2c_board_info info; + if (x.running || strncmp(adapter->name, "uni-n", 5)) + return; - memset(&info, 0, sizeof(struct i2c_board_info)); - strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE); + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775"); + if (np) { + of_node_put(np); + } else { + strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE); i2c_new_probed_device(adapter, &info, scan_ds1775, NULL); + } - strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE); + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030"); + if (np) { + of_node_put(np); + } else { + strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE); i2c_new_probed_device(adapter, &info, scan_adm1030, NULL); - - if( x.thermostat && x.fan ) { - x.running = 1; - x.poll_task = kthread_run(control_loop, NULL, "g4fand"); - } } - return 0; } static int @@ -404,8 +405,8 @@ out: enum chip { ds1775, adm1030 }; static const struct i2c_device_id therm_windtunnel_id[] = { - { "therm_ds1775", ds1775 }, - { "therm_adm1030", adm1030 }, + { "MAC,ds1775", ds1775 }, + { "MAC,adm1030", adm1030 }, { } }; MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id); @@ -414,6 +415,7 @@ static int do_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct i2c_adapter *adapter = cl->adapter; + int ret = 0; if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BYTE) ) @@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id) switch (id->driver_data) { case adm1030: - return attach_fan( cl ); + ret = attach_fan(cl); + break; case ds1775: - return attach_thermostat(cl); + ret = attach_thermostat(cl); + break; } - return 0; + + if (!x.running && x.thermostat && x.fan) { + x.running = 1; + x.poll_task = kthread_run(control_loop, NULL, "g4fand"); + } + + return ret; } static struct i2c_driver g4fan_driver = { diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c index 125605987b44..e7dec328c7cf 100644 --- a/drivers/macintosh/windfarm_ad7417_sensor.c +++ b/drivers/macintosh/windfarm_ad7417_sensor.c @@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_ad7417_id); +static const struct of_device_id wf_ad7417_of_id[] = { + { .compatible = "ad7417", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_ad7417_of_id); + static struct i2c_driver wf_ad7417_driver = { .driver = { .name = "wf_ad7417", + .of_match_table = wf_ad7417_of_id, }, .probe = wf_ad7417_probe, .remove = wf_ad7417_remove, diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c index 67daeec94b44..2470e5a725c8 100644 --- a/drivers/macintosh/windfarm_fcu_controls.c +++ b/drivers/macintosh/windfarm_fcu_controls.c @@ -580,9 +580,16 @@ static const struct i2c_device_id wf_fcu_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_fcu_id); +static const struct of_device_id wf_fcu_of_id[] = { + { .compatible = "fcu", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_fcu_of_id); + static struct i2c_driver wf_fcu_driver = { .driver = { .name = "wf_fcu", + .of_match_table = wf_fcu_of_id, }, .probe = wf_fcu_probe, .remove = wf_fcu_remove, diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 282c28a17ea1..1e5fa09845e7 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/i2c.h> +#include <linux/of_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> @@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wf_lm75_sensor *lm; - int rc, ds1775 = id->driver_data; + int rc, ds1775; const char *name, *loc; + if (id) + ds1775 = id->driver_data; + else + ds1775 = !!of_device_get_match_data(&client->dev); + DBG("wf_lm75: creating %s device at address 0x%02x\n", ds1775 ? "ds1775" : "lm75", client->addr); @@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_lm75_id); +static const struct of_device_id wf_lm75_of_id[] = { + { .compatible = "lm75", .data = (void *)0}, + { .compatible = "ds1775", .data = (void *)1 }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_lm75_of_id); + static struct i2c_driver wf_lm75_driver = { .driver = { .name = "wf_lm75", + .of_match_table = wf_lm75_of_id, }, .probe = wf_lm75_probe, .remove = wf_lm75_remove, diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c index b03a33b803b7..d011899c0a8a 100644 --- a/drivers/macintosh/windfarm_lm87_sensor.c +++ b/drivers/macintosh/windfarm_lm87_sensor.c @@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_lm87_id); +static const struct of_device_id wf_lm87_of_id[] = { + { .compatible = "lm87cimt", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_lm87_of_id); + static struct i2c_driver wf_lm87_driver = { .driver = { .name = "wf_lm87", + .of_match_table = wf_lm87_of_id, }, .probe = wf_lm87_probe, .remove = wf_lm87_remove, diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index e666cc020683..1e7b03d44ad9 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_max6690_id); +static const struct of_device_id wf_max6690_of_id[] = { + { .compatible = "max6690", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_max6690_of_id); + static struct i2c_driver wf_max6690_driver = { .driver = { .name = "wf_max6690", + .of_match_table = wf_max6690_of_id, }, .probe = wf_max6690_probe, .remove = wf_max6690_remove, diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index c84ec49c3741..cb75dc035616 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = { }; MODULE_DEVICE_TABLE(i2c, wf_sat_id); +static const struct of_device_id wf_sat_of_id[] = { + { .compatible = "smu-sat", }, + { } +}; +MODULE_DEVICE_TABLE(of, wf_sat_of_id); + static struct i2c_driver wf_sat_driver = { .driver = { .name = "wf_smu_sat", + .of_match_table = wf_sat_of_id, }, .probe = wf_sat_probe, .remove = wf_sat_remove, diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8bc1faf71ff2..a1df0d95151c 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -67,7 +67,6 @@ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/random.h> -#include <linux/sched/signal.h> #include <trace/events/bcache.h> #define MAX_OPEN_BUCKETS 128 @@ -734,21 +733,8 @@ int bch_open_buckets_alloc(struct cache_set *c) int bch_cache_allocator_start(struct cache *ca) { - struct task_struct *k; - - /* - * In case previous btree check operation occupies too many - * system memory for bcache btree node cache, and the - * registering process is selected by OOM killer. Here just - * ignore the SIGKILL sent by OOM killer if there is, to - * avoid kthread_run() being failed by pending signals. The - * bcache registering process will exit after the registration - * done. - */ - if (signal_pending(current)) - flush_signals(current); - - k = kthread_run(bch_allocator_thread, ca, "bcache_allocator"); + struct task_struct *k = kthread_run(bch_allocator_thread, + ca, "bcache_allocator"); if (IS_ERR(k)) return PTR_ERR(k); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index b12186c87f52..72856e5f23a3 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -34,7 +34,6 @@ #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/sched/clock.h> -#include <linux/sched/signal.h> #include <linux/rculist.h> #include <linux/delay.h> #include <trace/events/bcache.h> @@ -102,64 +101,6 @@ #define insert_lock(s, b) ((b)->level <= (s)->lock) -/* - * These macros are for recursing down the btree - they handle the details of - * locking and looking up nodes in the cache for you. They're best treated as - * mere syntax when reading code that uses them. - * - * op->lock determines whether we take a read or a write lock at a given depth. - * If you've got a read lock and find that you need a write lock (i.e. you're - * going to have to split), set op->lock and return -EINTR; btree_root() will - * call you again and you'll have the correct lock. - */ - -/** - * btree - recurse down the btree on a specified key - * @fn: function to call, which will be passed the child node - * @key: key to recurse on - * @b: parent btree node - * @op: pointer to struct btree_op - */ -#define btree(fn, key, b, op, ...) \ -({ \ - int _r, l = (b)->level - 1; \ - bool _w = l <= (op)->lock; \ - struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ - _w, b); \ - if (!IS_ERR(_child)) { \ - _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ - rw_unlock(_w, _child); \ - } else \ - _r = PTR_ERR(_child); \ - _r; \ -}) - -/** - * btree_root - call a function on the root of the btree - * @fn: function to call, which will be passed the child node - * @c: cache set - * @op: pointer to struct btree_op - */ -#define btree_root(fn, c, op, ...) \ -({ \ - int _r = -EINTR; \ - do { \ - struct btree *_b = (c)->root; \ - bool _w = insert_lock(op, _b); \ - rw_lock(_w, _b, _b->level); \ - if (_b == (c)->root && \ - _w == insert_lock(op, _b)) { \ - _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ - } \ - rw_unlock(_w, _b); \ - bch_cannibalize_unlock(c); \ - if (_r == -EINTR) \ - schedule(); \ - } while (_r == -EINTR); \ - \ - finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ - _r; \ -}) static inline struct bset *write_block(struct btree *b) { @@ -1849,7 +1790,7 @@ static void bch_btree_gc(struct cache_set *c) /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ do { - ret = btree_root(gc_root, c, &op, &writes, &stats); + ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); closure_sync(&writes); cond_resched(); @@ -1914,18 +1855,6 @@ static int bch_gc_thread(void *arg) int bch_gc_thread_start(struct cache_set *c) { - /* - * In case previous btree check operation occupies too many - * system memory for bcache btree node cache, and the - * registering process is selected by OOM killer. Here just - * ignore the SIGKILL sent by OOM killer if there is, to - * avoid kthread_run() being failed by pending signals. The - * bcache registering process will exit after the registration - * done. - */ - if (signal_pending(current)) - flush_signals(current); - c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); return PTR_ERR_OR_ZERO(c->gc_thread); } @@ -1959,7 +1888,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) } if (p) - ret = btree(check_recurse, p, b, op); + ret = bcache_btree(check_recurse, p, b, op); p = k; } while (p && !ret); @@ -1968,13 +1897,176 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) return ret; } + +static int bch_btree_check_thread(void *arg) +{ + int ret; + struct btree_check_info *info = arg; + struct btree_check_state *check_state = info->state; + struct cache_set *c = check_state->c; + struct btree_iter iter; + struct bkey *k, *p; + int cur_idx, prev_idx, skip_nr; + int i, n; + + k = p = NULL; + i = n = 0; + cur_idx = prev_idx = 0; + ret = 0; + + /* root node keys are checked before thread created */ + bch_btree_iter_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + BUG_ON(!k); + + p = k; + while (k) { + /* + * Fetch a root node key index, skip the keys which + * should be fetched by other threads, then check the + * sub-tree indexed by the fetched key. + */ + spin_lock(&check_state->idx_lock); + cur_idx = check_state->key_idx; + check_state->key_idx++; + spin_unlock(&check_state->idx_lock); + + skip_nr = cur_idx - prev_idx; + + while (skip_nr) { + k = bch_btree_iter_next_filter(&iter, + &c->root->keys, + bch_ptr_bad); + if (k) + p = k; + else { + /* + * No more keys to check in root node, + * current checking threads are enough, + * stop creating more. + */ + atomic_set(&check_state->enough, 1); + /* Update check_state->enough earlier */ + smp_mb__after_atomic(); + goto out; + } + skip_nr--; + cond_resched(); + } + + if (p) { + struct btree_op op; + + btree_node_prefetch(c->root, p); + c->gc_stats.nodes++; + bch_btree_op_init(&op, 0); + ret = bcache_btree(check_recurse, p, c->root, &op); + if (ret) + goto out; + } + p = NULL; + prev_idx = cur_idx; + cond_resched(); + } + +out: + info->result = ret; + /* update check_state->started among all CPUs */ + smp_mb__before_atomic(); + if (atomic_dec_and_test(&check_state->started)) + wake_up(&check_state->wait); + + return ret; +} + + + +static int bch_btree_chkthread_nr(void) +{ + int n = num_online_cpus()/2; + + if (n == 0) + n = 1; + else if (n > BCH_BTR_CHKTHREAD_MAX) + n = BCH_BTR_CHKTHREAD_MAX; + + return n; +} + int bch_btree_check(struct cache_set *c) { - struct btree_op op; + int ret = 0; + int i; + struct bkey *k = NULL; + struct btree_iter iter; + struct btree_check_state *check_state; + char name[32]; - bch_btree_op_init(&op, SHRT_MAX); + /* check and mark root node keys */ + for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) + bch_initial_mark_key(c, c->root->level, k); + + bch_initial_mark_key(c, c->root->level + 1, &c->root->key); + + if (c->root->level == 0) + return 0; + + check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); + if (!check_state) + return -ENOMEM; + + check_state->c = c; + check_state->total_threads = bch_btree_chkthread_nr(); + check_state->key_idx = 0; + spin_lock_init(&check_state->idx_lock); + atomic_set(&check_state->started, 0); + atomic_set(&check_state->enough, 0); + init_waitqueue_head(&check_state->wait); - return btree_root(check_recurse, c, &op); + /* + * Run multiple threads to check btree nodes in parallel, + * if check_state->enough is non-zero, it means current + * running check threads are enough, unncessary to create + * more. + */ + for (i = 0; i < check_state->total_threads; i++) { + /* fetch latest check_state->enough earlier */ + smp_mb__before_atomic(); + if (atomic_read(&check_state->enough)) + break; + + check_state->infos[i].result = 0; + check_state->infos[i].state = check_state; + snprintf(name, sizeof(name), "bch_btrchk[%u]", i); + atomic_inc(&check_state->started); + + check_state->infos[i].thread = + kthread_run(bch_btree_check_thread, + &check_state->infos[i], + name); + if (IS_ERR(check_state->infos[i].thread)) { + pr_err("fails to run thread bch_btrchk[%d]", i); + for (--i; i >= 0; i--) + kthread_stop(check_state->infos[i].thread); + ret = -ENOMEM; + goto out; + } + } + + wait_event_interruptible(check_state->wait, + atomic_read(&check_state->started) == 0 || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + + for (i = 0; i < check_state->total_threads; i++) { + if (check_state->infos[i].result) { + ret = check_state->infos[i].result; + goto out; + } + } + +out: + kfree(check_state); + return ret; } void bch_initial_gc_finish(struct cache_set *c) @@ -2414,7 +2506,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { - ret = btree(map_nodes_recurse, k, b, + ret = bcache_btree(map_nodes_recurse, k, b, op, from, fn, flags); from = NULL; @@ -2432,10 +2524,10 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_nodes_fn *fn, int flags) { - return btree_root(map_nodes_recurse, c, op, from, fn, flags); + return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); } -static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, struct bkey *from, btree_map_keys_fn *fn, int flags) { @@ -2448,7 +2540,8 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { ret = !b->level ? fn(op, b, k) - : btree(map_keys_recurse, k, b, op, from, fn, flags); + : bcache_btree(map_keys_recurse, k, + b, op, from, fn, flags); from = NULL; if (ret != MAP_CONTINUE) @@ -2465,7 +2558,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_keys_fn *fn, int flags) { - return btree_root(map_keys_recurse, c, op, from, fn, flags); + return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); } /* Keybuf code */ diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index f4dcca449391..257969980c49 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -145,6 +145,9 @@ struct btree { struct bio *bio; }; + + + #define BTREE_FLAG(flag) \ static inline bool btree_node_ ## flag(struct btree *b) \ { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \ @@ -216,6 +219,25 @@ struct btree_op { unsigned int insert_collision:1; }; +struct btree_check_state; +struct btree_check_info { + struct btree_check_state *state; + struct task_struct *thread; + int result; +}; + +#define BCH_BTR_CHKTHREAD_MAX 64 +struct btree_check_state { + struct cache_set *c; + int total_threads; + int key_idx; + spinlock_t idx_lock; + atomic_t started; + atomic_t enough; + wait_queue_head_t wait; + struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX]; +}; + static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) { memset(op, 0, sizeof(struct btree_op)); @@ -284,6 +306,65 @@ static inline void force_wake_up_gc(struct cache_set *c) wake_up_gc(c); } +/* + * These macros are for recursing down the btree - they handle the details of + * locking and looking up nodes in the cache for you. They're best treated as + * mere syntax when reading code that uses them. + * + * op->lock determines whether we take a read or a write lock at a given depth. + * If you've got a read lock and find that you need a write lock (i.e. you're + * going to have to split), set op->lock and return -EINTR; btree_root() will + * call you again and you'll have the correct lock. + */ + +/** + * btree - recurse down the btree on a specified key + * @fn: function to call, which will be passed the child node + * @key: key to recurse on + * @b: parent btree node + * @op: pointer to struct btree_op + */ +#define bcache_btree(fn, key, b, op, ...) \ +({ \ + int _r, l = (b)->level - 1; \ + bool _w = l <= (op)->lock; \ + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ + _w, b); \ + if (!IS_ERR(_child)) { \ + _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ + rw_unlock(_w, _child); \ + } else \ + _r = PTR_ERR(_child); \ + _r; \ +}) + +/** + * btree_root - call a function on the root of the btree + * @fn: function to call, which will be passed the child node + * @c: cache set + * @op: pointer to struct btree_op + */ +#define bcache_btree_root(fn, c, op, ...) \ +({ \ + int _r = -EINTR; \ + do { \ + struct btree *_b = (c)->root; \ + bool _w = insert_lock(op, _b); \ + rw_lock(_w, _b, _b->level); \ + if (_b == (c)->root && \ + _w == insert_lock(op, _b)) { \ + _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ + } \ + rw_unlock(_w, _b); \ + bch_cannibalize_unlock(c); \ + if (_r == -EINTR) \ + schedule(); \ + } while (_r == -EINTR); \ + \ + finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ + _r; \ +}) + #define MAP_DONE 0 #define MAP_CONTINUE 1 @@ -314,6 +395,9 @@ typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b, struct bkey *k); int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_keys_fn *fn, int flags); +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, + struct bkey *from, btree_map_keys_fn *fn, + int flags); typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 820d8402a1dc..71a90fbec314 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1161,8 +1161,7 @@ static void quit_max_writeback_rate(struct cache_set *c, /* Cached devices - read & write stuff */ -static blk_qc_t cached_dev_make_request(struct request_queue *q, - struct bio *bio) +blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) { struct search *s; struct bcache_device *d = bio->bi_disk->private_data; @@ -1266,7 +1265,6 @@ void bch_cached_dev_request_init(struct cached_dev *dc) { struct gendisk *g = dc->disk.disk; - g->queue->make_request_fn = cached_dev_make_request; g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; @@ -1301,8 +1299,7 @@ static void flash_dev_nodata(struct closure *cl) continue_at(cl, search_free, NULL); } -static blk_qc_t flash_dev_make_request(struct request_queue *q, - struct bio *bio) +blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio) { struct search *s; struct closure *cl; diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index c64dbd7a91aa..bb005c93dd72 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -37,7 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); +blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio); + void bch_flash_dev_request_init(struct bcache_device *d); +blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio); extern struct kmem_cache *bch_search_cache; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 0c3c5419c52b..d98354fa28e3 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -816,7 +816,7 @@ static void bcache_device_free(struct bcache_device *d) } static int bcache_device_init(struct bcache_device *d, unsigned int block_size, - sector_t sectors) + sector_t sectors, make_request_fn make_request_fn) { struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, @@ -866,11 +866,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->fops = &bcache_ops; d->disk->private_data = d; - q = blk_alloc_queue(GFP_KERNEL); + q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE); if (!q) return -ENOMEM; - blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; q->backing_dev_info->congested_data = d; @@ -1339,7 +1338,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) q->limits.raid_partial_stripes_expensive; ret = bcache_device_init(&dc->disk, block_size, - dc->bdev->bd_part->nr_sects - dc->sb.data_offset); + dc->bdev->bd_part->nr_sects - dc->sb.data_offset, + cached_dev_make_request); if (ret) return ret; @@ -1451,7 +1451,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) kobject_init(&d->kobj, &bch_flash_dev_ktype); - if (bcache_device_init(d, block_bytes(c), u->sectors)) + if (bcache_device_init(d, block_bytes(c), u->sectors, + flash_dev_make_request)) goto err; bcache_device_attach(d, c, u - c->uuids); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 3470fae4eabc..323276994aab 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -154,7 +154,7 @@ static ssize_t bch_snprint_string_list(char *buf, size_t i; for (i = 0; list[i]; i++) - out += snprintf(out, buf + size - out, + out += scnprintf(out, buf + size - out, i == selected ? "[%s] " : "%s ", list[i]); out[-1] = '\n'; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 4a40f9eadeaf..3f7641fb28d5 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -183,7 +183,7 @@ static void update_writeback_rate(struct work_struct *work) */ set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ - smp_mb(); + smp_mb__after_atomic(); /* * CACHE_SET_IO_DISABLE might be set via sysfs interface, @@ -193,7 +193,7 @@ static void update_writeback_rate(struct work_struct *work) test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ - smp_mb(); + smp_mb__after_atomic(); return; } @@ -229,7 +229,7 @@ static void update_writeback_rate(struct work_struct *work) */ clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ - smp_mb(); + smp_mb__after_atomic(); } static unsigned int writeback_delay(struct cached_dev *dc, @@ -785,7 +785,9 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, return MAP_CONTINUE; } -void bch_sectors_dirty_init(struct bcache_device *d) +static int bch_root_node_dirty_init(struct cache_set *c, + struct bcache_device *d, + struct bkey *k) { struct sectors_dirty_init op; int ret; @@ -796,8 +798,13 @@ void bch_sectors_dirty_init(struct bcache_device *d) op.start = KEY(op.inode, 0, 0); do { - ret = bch_btree_map_keys(&op.op, d->c, &op.start, - sectors_dirty_init_fn, 0); + ret = bcache_btree(map_keys_recurse, + k, + c->root, + &op.op, + &op.start, + sectors_dirty_init_fn, + 0); if (ret == -EAGAIN) schedule_timeout_interruptible( msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); @@ -806,6 +813,151 @@ void bch_sectors_dirty_init(struct bcache_device *d) break; } } while (ret == -EAGAIN); + + return ret; +} + +static int bch_dirty_init_thread(void *arg) +{ + struct dirty_init_thrd_info *info = arg; + struct bch_dirty_init_state *state = info->state; + struct cache_set *c = state->c; + struct btree_iter iter; + struct bkey *k, *p; + int cur_idx, prev_idx, skip_nr; + int i; + + k = p = NULL; + i = 0; + cur_idx = prev_idx = 0; + + bch_btree_iter_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + BUG_ON(!k); + + p = k; + + while (k) { + spin_lock(&state->idx_lock); + cur_idx = state->key_idx; + state->key_idx++; + spin_unlock(&state->idx_lock); + + skip_nr = cur_idx - prev_idx; + + while (skip_nr) { + k = bch_btree_iter_next_filter(&iter, + &c->root->keys, + bch_ptr_bad); + if (k) + p = k; + else { + atomic_set(&state->enough, 1); + /* Update state->enough earlier */ + smp_mb__after_atomic(); + goto out; + } + skip_nr--; + cond_resched(); + } + + if (p) { + if (bch_root_node_dirty_init(c, state->d, p) < 0) + goto out; + } + + p = NULL; + prev_idx = cur_idx; + cond_resched(); + } + +out: + /* In order to wake up state->wait in time */ + smp_mb__before_atomic(); + if (atomic_dec_and_test(&state->started)) + wake_up(&state->wait); + + return 0; +} + +static int bch_btre_dirty_init_thread_nr(void) +{ + int n = num_online_cpus()/2; + + if (n == 0) + n = 1; + else if (n > BCH_DIRTY_INIT_THRD_MAX) + n = BCH_DIRTY_INIT_THRD_MAX; + + return n; +} + +void bch_sectors_dirty_init(struct bcache_device *d) +{ + int i; + struct bkey *k = NULL; + struct btree_iter iter; + struct sectors_dirty_init op; + struct cache_set *c = d->c; + struct bch_dirty_init_state *state; + char name[32]; + + /* Just count root keys if no leaf node */ + if (c->root->level == 0) { + bch_btree_op_init(&op.op, -1); + op.inode = d->id; + op.count = 0; + op.start = KEY(op.inode, 0, 0); + + for_each_key_filter(&c->root->keys, + k, &iter, bch_ptr_invalid) + sectors_dirty_init_fn(&op.op, c->root, k); + return; + } + + state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); + if (!state) { + pr_warn("sectors dirty init failed: cannot allocate memory"); + return; + } + + state->c = c; + state->d = d; + state->total_threads = bch_btre_dirty_init_thread_nr(); + state->key_idx = 0; + spin_lock_init(&state->idx_lock); + atomic_set(&state->started, 0); + atomic_set(&state->enough, 0); + init_waitqueue_head(&state->wait); + + for (i = 0; i < state->total_threads; i++) { + /* Fetch latest state->enough earlier */ + smp_mb__before_atomic(); + if (atomic_read(&state->enough)) + break; + + state->infos[i].state = state; + atomic_inc(&state->started); + snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); + + state->infos[i].thread = + kthread_run(bch_dirty_init_thread, + &state->infos[i], + name); + if (IS_ERR(state->infos[i].thread)) { + pr_err("fails to run thread bch_dirty_init[%d]", i); + for (--i; i >= 0; i--) + kthread_stop(state->infos[i].thread); + goto out; + } + } + + wait_event_interruptible(state->wait, + atomic_read(&state->started) == 0 || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + +out: + kfree(state); } void bch_cached_dev_writeback_init(struct cached_dev *dc) diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 4e4c6810dc3c..b029843ce5b6 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -16,6 +16,7 @@ #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 +#define BCH_DIRTY_INIT_THRD_MAX 64 /* * 14 (16384ths) is chosen here as something that each backing device * should be a reasonable fraction of the share, and not to blow up @@ -23,6 +24,24 @@ */ #define WRITEBACK_SHARE_SHIFT 14 +struct bch_dirty_init_state; +struct dirty_init_thrd_info { + struct bch_dirty_init_state *state; + struct task_struct *thread; +}; + +struct bch_dirty_init_state { + struct cache_set *c; + struct bcache_device *d; + int total_threads; + int key_idx; + spinlock_t idx_lock; + atomic_t started; + atomic_t enough; + wait_queue_head_t wait; + struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX]; +}; + static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) { uint64_t i, ret = 0; diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index c82578af56a5..2ea0360108e1 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -20,8 +20,13 @@ struct dm_bio_details { struct gendisk *bi_disk; u8 bi_partno; + int __bi_remaining; unsigned long bi_flags; struct bvec_iter bi_iter; + bio_end_io_t *bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; +#endif }; static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) @@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) bd->bi_partno = bio->bi_partno; bd->bi_flags = bio->bi_flags; bd->bi_iter = bio->bi_iter; + bd->__bi_remaining = atomic_read(&bio->__bi_remaining); + bd->bi_end_io = bio->bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + bd->bi_integrity = bio_integrity(bio); +#endif } static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) @@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) bio->bi_partno = bd->bi_partno; bio->bi_flags = bd->bi_flags; bio->bi_iter = bd->bi_iter; + atomic_set(&bio->__bi_remaining, bd->__bi_remaining); + bio->bi_end_io = bd->bi_end_io; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + bio->bi_integrity = bd->bi_integrity; +#endif } #endif diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 2d32821b3a5b..d3bb355819a4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2846,8 +2846,8 @@ static void cache_postsuspend(struct dm_target *ti) prevent_background_work(cache); BUG_ON(atomic_read(&cache->nr_io_migrations)); - cancel_delayed_work(&cache->waker); - flush_workqueue(cache->wq); + cancel_delayed_work_sync(&cache->waker); + drain_workqueue(cache->wq); WARN_ON(cache->tracker.in_flight); /* @@ -3492,7 +3492,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {2, 1, 0}, + .version = {2, 2, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index b225b3e445fa..2f03fecd312d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -6,6 +6,8 @@ * This file is released under the GPL. */ +#include "dm-bio-record.h" + #include <linux/compiler.h> #include <linux/module.h> #include <linux/device-mapper.h> @@ -201,17 +203,19 @@ struct dm_integrity_c { __u8 log2_blocks_per_bitmap_bit; unsigned char mode; - int suspending; int failed; struct crypto_shash *internal_hash; + struct dm_target *ti; + /* these variables are locked with endio_wait.lock */ struct rb_root in_progress; struct list_head wait_list; wait_queue_head_t endio_wait; struct workqueue_struct *wait_wq; + struct workqueue_struct *offload_wq; unsigned char commit_seq; commit_id_t commit_ids[N_COMMIT_IDS]; @@ -293,11 +297,7 @@ struct dm_integrity_io { struct completion *completion; - struct gendisk *orig_bi_disk; - u8 orig_bi_partno; - bio_end_io_t *orig_bi_end_io; - struct bio_integrity_payload *orig_bi_integrity; - struct bvec_iter orig_bi_iter; + struct dm_bio_details bio_details; }; struct journal_completion { @@ -1439,7 +1439,7 @@ static void dec_in_flight(struct dm_integrity_io *dio) dio->range.logical_sector += dio->range.n_sectors; bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); return; } do_endio_flush(ic, dio); @@ -1450,14 +1450,9 @@ static void integrity_end_io(struct bio *bio) { struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); - bio->bi_iter = dio->orig_bi_iter; - bio->bi_disk = dio->orig_bi_disk; - bio->bi_partno = dio->orig_bi_partno; - if (dio->orig_bi_integrity) { - bio->bi_integrity = dio->orig_bi_integrity; + dm_bio_restore(&dio->bio_details, bio); + if (bio->bi_integrity) bio->bi_opf |= REQ_INTEGRITY; - } - bio->bi_end_io = dio->orig_bi_end_io; if (dio->completion) complete(dio->completion); @@ -1542,7 +1537,7 @@ static void integrity_metadata(struct work_struct *w) } } - __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { + __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { unsigned pos; char *mem, *checksums_ptr; @@ -1586,7 +1581,7 @@ again: if (likely(checksums != checksums_onstack)) kfree(checksums); } else { - struct bio_integrity_payload *bip = dio->orig_bi_integrity; + struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; if (bip) { struct bio_vec biv; @@ -1865,7 +1860,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (need_sync_io && from_map) { INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->metadata_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); return; } @@ -2005,20 +2000,13 @@ offload_to_thread: } else dio->completion = NULL; - dio->orig_bi_iter = bio->bi_iter; - - dio->orig_bi_disk = bio->bi_disk; - dio->orig_bi_partno = bio->bi_partno; + dm_bio_record(&dio->bio_details, bio); bio_set_dev(bio, ic->dev->bdev); - - dio->orig_bi_integrity = bio_integrity(bio); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; - - dio->orig_bi_end_io = bio->bi_end_io; bio->bi_end_io = integrity_end_io; - bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; + generic_make_request(bio); if (need_sync_io) { @@ -2315,7 +2303,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (READ_ONCE(ic->suspending) && !ic->meta_dev) + if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2376,7 +2364,7 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(READ_ONCE(ic->suspending))) + if (unlikely(dm_suspended(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); @@ -2501,7 +2489,7 @@ static void bitmap_block_work(struct work_struct *w) dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { remove_range(ic, &dio->range); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); } else { block_bitmap_op(ic, ic->journal, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_SET); @@ -2524,7 +2512,7 @@ static void bitmap_block_work(struct work_struct *w) remove_range(ic, &dio->range); INIT_WORK(&dio->work, integrity_bio_wait); - queue_work(ic->wait_wq, &dio->work); + queue_work(ic->offload_wq, &dio->work); } queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); @@ -2804,8 +2792,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti) del_timer_sync(&ic->autocommit_timer); - WRITE_ONCE(ic->suspending, 1); - if (ic->recalc_wq) drain_workqueue(ic->recalc_wq); @@ -2834,8 +2820,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti) #endif } - WRITE_ONCE(ic->suspending, 0); - BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); ic->journal_uptodate = true; @@ -2888,17 +2872,24 @@ static void dm_integrity_resume(struct dm_target *ti) } else { replay_journal(ic); if (ic->mode == 'B') { - int mode; ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); - mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR; - block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode); - block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode); - block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode); + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { + block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); + } rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } @@ -2967,7 +2958,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" meta_device:%s", ic->meta_dev->name); if (ic->sectors_per_block != 1) DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); - if (ic->recalculate_flag) + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) DMEMIT(" recalculate"); DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); @@ -3623,6 +3614,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = ic; ti->per_io_data_size = sizeof(struct dm_integrity_io); + ic->ti = ti; ic->in_progress = RB_ROOT; INIT_LIST_HEAD(&ic->wait_list); @@ -3836,6 +3828,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, + METADATA_WORKQUEUE_MAX_ACTIVE); + if (!ic->offload_wq) { + ti->error = "Cannot allocate workqueue"; + r = -ENOMEM; + goto bad; + } + ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); if (!ic->commit_wq) { ti->error = "Cannot allocate workqueue"; @@ -4140,6 +4140,8 @@ static void dm_integrity_dtr(struct dm_target *ti) destroy_workqueue(ic->metadata_wq); if (ic->wait_wq) destroy_workqueue(ic->wait_wq); + if (ic->offload_wq) + destroy_workqueue(ic->offload_wq); if (ic->commit_wq) destroy_workqueue(ic->commit_wq); if (ic->writer_wq) @@ -4200,7 +4202,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 4, 0}, + .version = {1, 5, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 2bc18c9c3abc..58fd137b6ae1 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -2053,7 +2053,7 @@ static int multipath_busy(struct dm_target *ti) *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 13, 0}, + .version = {1, 14, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_INTEGRITY, .module = THIS_MODULE, diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index fc9947d6210c..76b6b323bf4b 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) DMWARN("%s: __commit_transaction() failed, error = %d", __func__, r); } + pmd_write_unlock(pmd); if (!pmd->fail_io) __destroy_persistent_data_objects(pmd); - pmd_write_unlock(pmd); kfree(pmd); return 0; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 0d61e9c67986..eec9f252e935 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1221,7 +1221,7 @@ bad: static struct target_type verity_target = { .name = "verity", - .version = {1, 5, 0}, + .version = {1, 6, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index b9e27e37a943..a09bdc000e64 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry wc->freelist_size++; } +static inline void writecache_verify_watermark(struct dm_writecache *wc) +{ + if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) + queue_work(wc->writeback_wq, &wc->writeback_work); +} + static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector) { struct wc_entry *e; @@ -650,8 +656,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s list_del(&e->lru); } wc->freelist_size--; - if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) - queue_work(wc->writeback_wq, &wc->writeback_work); + + writecache_verify_watermark(wc); return e; } @@ -842,7 +848,7 @@ static void writecache_suspend(struct dm_target *ti) } wc_unlock(wc); - flush_workqueue(wc->writeback_wq); + drain_workqueue(wc->writeback_wq); wc_lock(wc); if (flush_on_suspend) @@ -965,6 +971,8 @@ erase_this: writecache_commit_flushed(wc, false); } + writecache_verify_watermark(wc); + wc_unlock(wc); } @@ -2312,7 +2320,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type, static struct target_type writecache_target = { .name = "writecache", - .version = {1, 1, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = writecache_ctr, .dtr = writecache_dtr, diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 70a1063161c0..f4f83d39b3dc 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) /* Get the BIO chunk work. If one is not active yet, create one */ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); - if (!cw) { - + if (cw) { + dmz_get_chunk_work(cw); + } else { /* Create a new chunk work */ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); if (unlikely(!cw)) { @@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) } INIT_WORK(&cw->work, dmz_chunk_work); - refcount_set(&cw->refcount, 0); + refcount_set(&cw->refcount, 1); cw->target = dmz; cw->chunk = chunk; bio_list_init(&cw->bio_list); @@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) } bio_list_add(&cw->bio_list, bio); - dmz_get_chunk_work(cw); dmz_reclaim_bio_acc(dmz->reclaim); if (queue_work(dmz->chunk_wq, &cw->work)) @@ -967,7 +967,7 @@ static int dmz_iterate_devices(struct dm_target *ti, static struct target_type dmz_type = { .name = "zoned", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, .module = THIS_MODULE, .ctr = dmz_ctr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b89f07ee2eff..753302e83910 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -25,6 +25,7 @@ #include <linux/wait.h> #include <linux/pr.h> #include <linux/refcount.h> +#include <linux/part_stat.h> #define DM_MSG_PREFIX "core" @@ -1788,7 +1789,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * With request-based DM we only need to check the * top-level queue for congestion. */ - r = md->queue->backing_dev_info->wb.state & bdi_bits; + struct backing_dev_info *bdi = md->queue->backing_dev_info; + r = bdi->wb.congested->state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) @@ -1854,15 +1856,6 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); -static void dm_init_normal_md_queue(struct mapped_device *md) -{ - /* - * Initialize aspects of queue that aren't relevant for blk-mq - */ - md->queue->backing_dev_info->congested_data = md; - md->queue->backing_dev_info->congested_fn = dm_any_congested; -} - static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) @@ -1946,16 +1939,15 @@ static struct mapped_device *alloc_dev(int minor) INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); - md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); - if (!md->queue) - goto bad; - md->queue->queuedata = md; /* * default to bio-based required ->make_request_fn until DM * table is loaded and md->type established. If request-based * table is loaded: blk-mq will override accordingly. */ - blk_queue_make_request(md->queue, dm_make_request); + md->queue = blk_alloc_queue(dm_make_request, numa_node_id); + if (!md->queue) + goto bad; + md->queue->queuedata = md; md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) @@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); +static void dm_init_congested_fn(struct mapped_device *md) +{ + md->queue->backing_dev_info->congested_data = md; + md->queue->backing_dev_info->congested_fn = dm_any_congested; +} + /* * Setup the DM device's queue based on md's type */ @@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } + dm_init_congested_fn(md); break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_NVME_BIO_BASED: - dm_init_normal_md_queue(md); + dm_init_congested_fn(md); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2368,6 +2367,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); + set_bit(DMF_SUSPENDED, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 469f551863be..271e8a587354 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -58,8 +58,10 @@ #include <linux/delay.h> #include <linux/raid/md_p.h> #include <linux/raid/md_u.h> +#include <linux/raid/detect.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/part_stat.h> #include <trace/events/block.h> #include "md.h" @@ -2491,12 +2493,12 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) { int err = 0; struct block_device *bdev; - char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, shared ? (struct md_rdev *)lock_rdev : rdev); if (IS_ERR(bdev)) { - pr_warn("md: could not open %s.\n", __bdevname(dev, b)); + pr_warn("md: could not open device unknown-block(%u,%u).\n", + MAJOR(dev), MINOR(dev)); return PTR_ERR(bdev); } rdev->bdev = bdev; @@ -5621,12 +5623,11 @@ static int md_alloc(dev_t dev, char *name) mddev->hold_active = UNTIL_STOP; error = -ENOMEM; - mddev->queue = blk_alloc_queue(GFP_KERNEL); + mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE); if (!mddev->queue) goto abort; mddev->queue->queuedata = mddev; - blk_queue_make_request(mddev->queue, md_make_request); blk_set_stacking_limits(&mddev->queue->limits); disk = alloc_disk(1 << shift); @@ -6184,7 +6185,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { md_bitmap_wait_behind_writes(mddev); - if (mddev->pers && mddev->pers->quiesce) { + if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index b36a41332867..9dfea5c4b6ab 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -208,9 +208,9 @@ config MEDIA_SUBDRV_AUTOSELECT If unsure say Y. config MEDIA_HIDE_ANCILLARY_SUBDRV - bool - depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT - default y + bool + depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT + default y config MEDIA_ATTACH bool diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index 4a841bee5cc2..e748cd54b45d 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c @@ -23,7 +23,7 @@ struct cec_notifier { struct kref kref; struct device *hdmi_dev; struct cec_connector_info conn_info; - const char *conn_name; + const char *port_name; struct cec_adapter *cec_adap; u16 phys_addr; @@ -32,16 +32,30 @@ struct cec_notifier { static LIST_HEAD(cec_notifiers); static DEFINE_MUTEX(cec_notifiers_lock); -struct cec_notifier * -cec_notifier_get_conn(struct device *hdmi_dev, const char *conn_name) +/** + * cec_notifier_get_conn - find or create a new cec_notifier for the given + * device and connector tuple. + * @hdmi_dev: device that sends the events. + * @port_name: the connector name from which the event occurs + * + * If a notifier for device @dev already exists, then increase the refcount + * and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +static struct cec_notifier * +cec_notifier_get_conn(struct device *hdmi_dev, const char *port_name) { struct cec_notifier *n; mutex_lock(&cec_notifiers_lock); list_for_each_entry(n, &cec_notifiers, head) { if (n->hdmi_dev == hdmi_dev && - (!conn_name || - (n->conn_name && !strcmp(n->conn_name, conn_name)))) { + (!port_name || + (n->port_name && !strcmp(n->port_name, port_name)))) { kref_get(&n->kref); mutex_unlock(&cec_notifiers_lock); return n; @@ -51,9 +65,9 @@ cec_notifier_get_conn(struct device *hdmi_dev, const char *conn_name) if (!n) goto unlock; n->hdmi_dev = hdmi_dev; - if (conn_name) { - n->conn_name = kstrdup(conn_name, GFP_KERNEL); - if (!n->conn_name) { + if (port_name) { + n->port_name = kstrdup(port_name, GFP_KERNEL); + if (!n->port_name) { kfree(n); n = NULL; goto unlock; @@ -68,7 +82,6 @@ unlock: mutex_unlock(&cec_notifiers_lock); return n; } -EXPORT_SYMBOL_GPL(cec_notifier_get_conn); static void cec_notifier_release(struct kref *kref) { @@ -76,7 +89,7 @@ static void cec_notifier_release(struct kref *kref) container_of(kref, struct cec_notifier, kref); list_del(&n->head); - kfree(n->conn_name); + kfree(n->port_name); kfree(n); } @@ -88,10 +101,10 @@ static void cec_notifier_put(struct cec_notifier *n) } struct cec_notifier * -cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, const struct cec_connector_info *conn_info) { - struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, conn_name); + struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, port_name); if (!n) return n; @@ -129,7 +142,7 @@ void cec_notifier_conn_unregister(struct cec_notifier *n) EXPORT_SYMBOL_GPL(cec_notifier_conn_unregister); struct cec_notifier * -cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, struct cec_adapter *adap) { struct cec_notifier *n; @@ -137,7 +150,7 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, if (WARN_ON(!adap)) return NULL; - n = cec_notifier_get_conn(hdmi_dev, conn_name); + n = cec_notifier_get_conn(hdmi_dev, port_name); if (!n) return n; diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index aabb830e7468..d6531874faa6 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -97,8 +97,6 @@ void saa7146_buffer_finish(struct saa7146_dev *dev, DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state); DEB_EE("q->curr:%p\n", q->curr); - BUG_ON(!q->curr); - /* finish current buffer */ if (NULL == q->curr) { DEB_D("aiii. no current buffer\n"); @@ -296,7 +294,7 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma) int res; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: { + case VFL_TYPE_VIDEO: { DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n", file, vma); q = &fh->video_q; @@ -378,7 +376,7 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof int ret; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: /* DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun", file, data, (unsigned long)count); @@ -409,7 +407,7 @@ static ssize_t fops_write(struct file *file, const char __user *data, size_t cou int ret; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: return -EINVAL; case VFL_TYPE_VBI: if (fh->dev->ext_vv_data->vbi_fops.write) { @@ -597,7 +595,7 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev, DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type); vfd->fops = &video_fops; - if (type == VFL_TYPE_GRABBER) + if (type == VFL_TYPE_VIDEO) vfd->ioctl_ops = &dev->ext_vv_data->vid_ops; else vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops; @@ -611,7 +609,7 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev, vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; vfd->device_caps |= dev->ext_vv_data->capabilities; - if (type == VFL_TYPE_GRABBER) + if (type == VFL_TYPE_VIDEO) vfd->device_caps &= ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT); else diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c index c95d4583498e..8916bb644756 100644 --- a/drivers/media/common/siano/smsdvb-debugfs.c +++ b/drivers/media/common/siano/smsdvb-debugfs.c @@ -45,88 +45,88 @@ static void smsdvb_print_dvb_stats(struct smsdvb_debugfs *debug_data, buf = debug_data->stats_data; - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\n", p->is_rf_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\n", p->is_demod_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "SNR = %d\n", p->SNR); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "ber = %d\n", p->ber); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "FIB_CRC = %d\n", p->FIB_CRC); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "ts_per = %d\n", p->ts_per); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "MFER = %d\n", p->MFER); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d\n", p->RSSI); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d\n", p->in_band_pwr); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\n", p->carrier_offset); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\n", p->modem_state); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "frequency = %d\n", p->frequency); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\n", p->bandwidth); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\n", p->transmission_mode); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\n", p->modem_state); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "code_rate = %d\n", p->code_rate); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "lp_code_rate = %d\n", p->lp_code_rate); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "hierarchy = %d\n", p->hierarchy); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->constellation); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "burst_size = %d\n", p->burst_size); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "burst_duration = %d\n", p->burst_duration); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "burst_cycle_time = %d\n", p->burst_cycle_time); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "calc_burst_cycle_time = %d\n", p->calc_burst_cycle_time); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_rows = %d\n", p->num_of_rows); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_padd_cols = %d\n", p->num_of_padd_cols); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_punct_cols = %d\n", p->num_of_punct_cols); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "error_ts_packets = %d\n", p->error_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %d\n", p->total_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_corrected_mpe_tlbs = %d\n", p->num_of_corrected_mpe_tlbs); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_error_count = %d\n", p->ber_error_count); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %d\n", p->ber_bit_count); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "pre_ber = %d\n", p->pre_ber); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "cell_id = %d\n", p->cell_id); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_mpe_received = %d\n", p->num_mpe_received); debug_data->stats_count = n; @@ -148,42 +148,42 @@ static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data, buf = debug_data->stats_data; - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "statistics_type = %d\t", p->statistics_type); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "full_size = %d\n", p->full_size); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\t\t", p->is_rf_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\t", p->is_demod_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "SNR = %d dB\t\t", p->SNR); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d dBm\t\t", p->RSSI); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d dBm\n", p->in_band_pwr); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\t", p->carrier_offset); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\t\t", p->bandwidth); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "frequency = %d Hz\n", p->frequency); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\t", p->transmission_mode); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\t\t", p->modem_state); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "system_type = %d\t\t", p->system_type); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "partial_reception = %d\t", p->partial_reception); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_layers = %d\n", p->num_of_layers); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); for (i = 0; i < 3; i++) { @@ -191,31 +191,34 @@ static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data, p->layer_info[i].number_of_segments > 13) continue; - n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); - n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->layer_info[i].constellation); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", p->layer_info[i].ber); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "\tber_error_count = %-5d\t", p->layer_info[i].ber_error_count); - n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", p->layer_info[i].ber_bit_count); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); - n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "\terror_ts_packets = %-5d\t", p->layer_info[i].error_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "total_ts_packets = %-5d\t", p->layer_info[i].total_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tnumber_of_segments = %d\t", p->layer_info[i].number_of_segments); - n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); } @@ -238,44 +241,44 @@ static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data, buf = debug_data->stats_data; - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "statistics_type = %d\t", p->statistics_type); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "full_size = %d\n", p->full_size); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_rf_locked = %d\t\t", p->is_rf_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_demod_locked = %d\t", p->is_demod_locked); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "is_external_lna_on = %d\n", p->is_external_lna_on); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "SNR = %d dB\t\t", p->SNR); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "RSSI = %d dBm\t\t", p->RSSI); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "in_band_pwr = %d dBm\n", p->in_band_pwr); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "carrier_offset = %d\t", p->carrier_offset); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "bandwidth = %d\t\t", p->bandwidth); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "frequency = %d Hz\n", p->frequency); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "transmission_mode = %d\t", p->transmission_mode); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "modem_state = %d\t\t", p->modem_state); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "guard_interval = %d\n", p->guard_interval); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "system_type = %d\t\t", p->system_type); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "partial_reception = %d\t", p->partial_reception); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "num_of_layers = %d\n", p->num_of_layers); - n += snprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t", p->segment_number); - n += snprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n", p->tune_bw); for (i = 0; i < 3; i++) { @@ -283,31 +286,34 @@ static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data, p->layer_info[i].number_of_segments > 13) continue; - n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); - n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", p->layer_info[i].constellation); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", p->layer_info[i].ber); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "\tber_error_count = %-5d\t", p->layer_info[i].ber_error_count); - n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", p->layer_info[i].ber_bit_count); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); - n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); - n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "\terror_ts_packets = %-5d\t", p->layer_info[i].error_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", + n += scnprintf(&buf[n], PAGE_SIZE - n, + "total_ts_packets = %-5d\t", p->layer_info[i].total_ts_packets); - n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); - n += snprintf(&buf[n], PAGE_SIZE - n, + n += scnprintf(&buf[n], PAGE_SIZE - n, "\tnumber_of_segments = %d\t", p->layer_info[i].number_of_segments); - n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", + n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); } diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 4489744fbbd9..44d65f5be845 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -393,8 +393,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, } } - dprintk(1, "allocated %d buffers, %d plane(s) each\n", - buffer, num_planes); + dprintk(3, "allocated %d buffers, %d plane(s) each\n", + buffer, num_planes); return buffer; } diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index d0c9dffe49e5..d3a3ee5b597b 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -593,8 +593,8 @@ static int vb2_dc_map_dmabuf(void *mem_priv) /* checking if dmabuf is big enough to store contiguous chunk */ contig_size = vb2_dc_get_contiguous_size(sgt); if (contig_size < buf->size) { - pr_err("contiguous chunk is too small %lu/%lu b\n", - contig_size, buf->size); + pr_err("contiguous chunk is too small %lu/%lu\n", + contig_size, buf->size); dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); return -EFAULT; } diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index ac7be872f460..5de016412c42 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -2182,7 +2182,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr, u32 *data, u32 flags) { u8 buf[sizeof(*data)] = { 0 }; - int rc = -EIO; + int rc; u32 word = 0; if (!data) @@ -4229,7 +4229,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr, u16 data, u32 flags) { u8 buf[2]; - int rc = -EIO; + int rc; buf[0] = (u8) (data & 0xff); buf[1] = (u8) ((data >> 8) & 0xff); diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index c96f05ff5f2f..d2c28dcf6b42 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -65,6 +65,92 @@ err: } /* + * m88ds3103b demod has an internal device related to clocking. First the i2c + * gate must be opened, for one transaction, then writes will be allowed. + */ +static int m88ds3103b_dt_write(struct m88ds3103_dev *dev, int reg, int data) +{ + struct i2c_client *client = dev->client; + u8 buf[] = {reg, data}; + u8 val; + int ret; + struct i2c_msg msg = { + .addr = dev->dt_addr, .flags = 0, .buf = buf, .len = 2 + }; + + m88ds3103_update_bits(dev, 0x11, 0x01, 0x00); + + val = 0x11; + ret = regmap_write(dev->regmap, 0x03, val); + if (ret) + dev_dbg(&client->dev, "fail=%d\n", ret); + + ret = i2c_transfer(dev->dt_client->adapter, &msg, 1); + if (ret != 1) { + dev_err(&client->dev, "0x%02x (ret=%i, reg=0x%02x, value=0x%02x)\n", + dev->dt_addr, ret, reg, data); + + m88ds3103_update_bits(dev, 0x11, 0x01, 0x01); + return -EREMOTEIO; + } + m88ds3103_update_bits(dev, 0x11, 0x01, 0x01); + + dev_dbg(&client->dev, "0x%02x reg 0x%02x, value 0x%02x\n", + dev->dt_addr, reg, data); + + return 0; +} + +/* + * m88ds3103b demod has an internal device related to clocking. First the i2c + * gate must be opened, for two transactions, then reads will be allowed. + */ +static int m88ds3103b_dt_read(struct m88ds3103_dev *dev, u8 reg) +{ + struct i2c_client *client = dev->client; + int ret; + u8 val; + u8 b0[] = { reg }; + u8 b1[] = { 0 }; + struct i2c_msg msg[] = { + { + .addr = dev->dt_addr, + .flags = 0, + .buf = b0, + .len = 1 + }, + { + .addr = dev->dt_addr, + .flags = I2C_M_RD, + .buf = b1, + .len = 1 + } + }; + + m88ds3103_update_bits(dev, 0x11, 0x01, 0x00); + + val = 0x12; + ret = regmap_write(dev->regmap, 0x03, val); + if (ret) + dev_dbg(&client->dev, "fail=%d\n", ret); + + ret = i2c_transfer(dev->dt_client->adapter, msg, 2); + if (ret != 2) { + dev_err(&client->dev, "0x%02x (ret=%d, reg=0x%02x)\n", + dev->dt_addr, ret, reg); + + m88ds3103_update_bits(dev, 0x11, 0x01, 0x01); + return -EREMOTEIO; + } + m88ds3103_update_bits(dev, 0x11, 0x01, 0x01); + + dev_dbg(&client->dev, "0x%02x reg 0x%02x, value 0x%02x\n", + dev->dt_addr, reg, b1[0]); + + return b1[0]; +} + +/* * Get the demodulator AGC PWM voltage setting supplied to the tuner. */ int m88ds3103_get_agc_pwm(struct dvb_frontend *fe, u8 *_agc_pwm) @@ -288,6 +374,251 @@ err: return ret; } +static int m88ds3103b_select_mclk(struct m88ds3103_dev *dev) +{ + struct i2c_client *client = dev->client; + struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; + u32 adc_Freq_MHz[3] = {96, 93, 99}; + u8 reg16_list[3] = {96, 92, 100}, reg16, reg15; + u32 offset_MHz[3]; + u32 max_offset = 0; + u32 old_setting = dev->mclk; + u32 tuner_freq_MHz = c->frequency / 1000; + u8 i; + char big_symbol = 0; + + big_symbol = (c->symbol_rate > 45010000) ? 1 : 0; + + if (big_symbol) { + reg16 = 115; + } else { + reg16 = 96; + + /* TODO: IS THIS NECESSARY ? */ + for (i = 0; i < 3; i++) { + offset_MHz[i] = tuner_freq_MHz % adc_Freq_MHz[i]; + + if (offset_MHz[i] > (adc_Freq_MHz[i] / 2)) + offset_MHz[i] = adc_Freq_MHz[i] - offset_MHz[i]; + + if (offset_MHz[i] > max_offset) { + max_offset = offset_MHz[i]; + reg16 = reg16_list[i]; + dev->mclk = adc_Freq_MHz[i] * 1000 * 1000; + + if (big_symbol) + dev->mclk /= 2; + + dev_dbg(&client->dev, "modifying mclk %u -> %u\n", + old_setting, dev->mclk); + } + } + } + + if (dev->mclk == 93000000) + regmap_write(dev->regmap, 0xA0, 0x42); + else if (dev->mclk == 96000000) + regmap_write(dev->regmap, 0xA0, 0x44); + else if (dev->mclk == 99000000) + regmap_write(dev->regmap, 0xA0, 0x46); + else if (dev->mclk == 110250000) + regmap_write(dev->regmap, 0xA0, 0x4E); + else + regmap_write(dev->regmap, 0xA0, 0x44); + + reg15 = m88ds3103b_dt_read(dev, 0x15); + + m88ds3103b_dt_write(dev, 0x05, 0x40); + m88ds3103b_dt_write(dev, 0x11, 0x08); + + if (big_symbol) + reg15 |= 0x02; + else + reg15 &= ~0x02; + + m88ds3103b_dt_write(dev, 0x15, reg15); + m88ds3103b_dt_write(dev, 0x16, reg16); + + usleep_range(5000, 5500); + + m88ds3103b_dt_write(dev, 0x05, 0x00); + m88ds3103b_dt_write(dev, 0x11, (u8)(big_symbol ? 0x0E : 0x0A)); + + usleep_range(5000, 5500); + + return 0; +} + +static int m88ds3103b_set_mclk(struct m88ds3103_dev *dev, u32 mclk_khz) +{ + u8 reg11 = 0x0A, reg15, reg16, reg1D, reg1E, reg1F, tmp; + u8 sm, f0 = 0, f1 = 0, f2 = 0, f3 = 0; + u16 pll_div_fb, N; + u32 div; + + reg15 = m88ds3103b_dt_read(dev, 0x15); + reg16 = m88ds3103b_dt_read(dev, 0x16); + reg1D = m88ds3103b_dt_read(dev, 0x1D); + + if (dev->cfg->ts_mode != M88DS3103_TS_SERIAL) { + if (reg16 == 92) + tmp = 93; + else if (reg16 == 100) + tmp = 99; + else + tmp = 96; + + mclk_khz *= tmp; + mclk_khz /= 96; + } + + pll_div_fb = (reg15 & 0x01) << 8; + pll_div_fb += reg16; + pll_div_fb += 32; + + div = 9000 * pll_div_fb * 4; + div /= mclk_khz; + + if (dev->cfg->ts_mode == M88DS3103_TS_SERIAL) { + reg11 |= 0x02; + + if (div <= 32) { + N = 2; + + f0 = 0; + f1 = div / N; + f2 = div - f1; + f3 = 0; + } else if (div <= 34) { + N = 3; + + f0 = div / N; + f1 = (div - f0) / (N - 1); + f2 = div - f0 - f1; + f3 = 0; + } else if (div <= 64) { + N = 4; + + f0 = div / N; + f1 = (div - f0) / (N - 1); + f2 = (div - f0 - f1) / (N - 2); + f3 = div - f0 - f1 - f2; + } else { + N = 4; + + f0 = 16; + f1 = 16; + f2 = 16; + f3 = 16; + } + + if (f0 == 16) + f0 = 0; + else if ((f0 < 8) && (f0 != 0)) + f0 = 8; + + if (f1 == 16) + f1 = 0; + else if ((f1 < 8) && (f1 != 0)) + f1 = 8; + + if (f2 == 16) + f2 = 0; + else if ((f2 < 8) && (f2 != 0)) + f2 = 8; + + if (f3 == 16) + f3 = 0; + else if ((f3 < 8) && (f3 != 0)) + f3 = 8; + } else { + reg11 &= ~0x02; + + if (div <= 32) { + N = 2; + + f0 = 0; + f1 = div / N; + f2 = div - f1; + f3 = 0; + } else if (div <= 48) { + N = 3; + + f0 = div / N; + f1 = (div - f0) / (N - 1); + f2 = div - f0 - f1; + f3 = 0; + } else if (div <= 64) { + N = 4; + + f0 = div / N; + f1 = (div - f0) / (N - 1); + f2 = (div - f0 - f1) / (N - 2); + f3 = div - f0 - f1 - f2; + } else { + N = 4; + + f0 = 16; + f1 = 16; + f2 = 16; + f3 = 16; + } + + if (f0 == 16) + f0 = 0; + else if ((f0 < 9) && (f0 != 0)) + f0 = 9; + + if (f1 == 16) + f1 = 0; + else if ((f1 < 9) && (f1 != 0)) + f1 = 9; + + if (f2 == 16) + f2 = 0; + else if ((f2 < 9) && (f2 != 0)) + f2 = 9; + + if (f3 == 16) + f3 = 0; + else if ((f3 < 9) && (f3 != 0)) + f3 = 9; + } + + sm = N - 1; + + /* Write to registers */ + //reg15 &= 0x01; + //reg15 |= (pll_div_fb >> 8) & 0x01; + + //reg16 = pll_div_fb & 0xFF; + + reg1D &= ~0x03; + reg1D |= sm; + reg1D |= 0x80; + + reg1E = ((f3 << 4) + f2) & 0xFF; + reg1F = ((f1 << 4) + f0) & 0xFF; + + m88ds3103b_dt_write(dev, 0x05, 0x40); + m88ds3103b_dt_write(dev, 0x11, 0x08); + m88ds3103b_dt_write(dev, 0x1D, reg1D); + m88ds3103b_dt_write(dev, 0x1E, reg1E); + m88ds3103b_dt_write(dev, 0x1F, reg1F); + + m88ds3103b_dt_write(dev, 0x17, 0xc1); + m88ds3103b_dt_write(dev, 0x17, 0x81); + + usleep_range(5000, 5500); + + m88ds3103b_dt_write(dev, 0x05, 0x00); + m88ds3103b_dt_write(dev, 0x11, 0x0A); + + usleep_range(5000, 5500); + + return 0; +} + static int m88ds3103_set_frontend(struct dvb_frontend *fe) { struct m88ds3103_dev *dev = fe->demodulator_priv; @@ -298,7 +629,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */ u8 buf[3]; u16 u16tmp; - u32 tuner_frequency_khz, target_mclk; + u32 tuner_frequency_khz, target_mclk, u32tmp; s32 s32tmp; static const struct reg_sequence reset_buf[] = { {0x07, 0x80}, {0x07, 0x00} @@ -321,6 +652,20 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) /* Disable demod clock path */ if (dev->chip_id == M88RS6000_CHIP_ID) { + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + ret = regmap_read(dev->regmap, 0xb2, &u32tmp); + if (ret) + goto err; + if (u32tmp == 0x01) { + ret = regmap_write(dev->regmap, 0x00, 0x00); + if (ret) + goto err; + ret = regmap_write(dev->regmap, 0xb2, 0x00); + if (ret) + goto err; + } + } + ret = regmap_write(dev->regmap, 0x06, 0xe0); if (ret) goto err; @@ -346,7 +691,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) tuner_frequency_khz = c->frequency; } - /* select M88RS6000 demod main mclk and ts mclk from tuner die. */ + /* set M88RS6000/DS3103B demod main mclk and ts mclk from tuner die */ if (dev->chip_id == M88RS6000_CHIP_ID) { if (c->symbol_rate > 45010000) dev->mclk = 110250000; @@ -358,6 +703,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) else target_mclk = 144000000; + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + m88ds3103b_select_mclk(dev); + m88ds3103b_set_mclk(dev, target_mclk / 1000); + } + /* Enable demod clock path */ ret = regmap_write(dev->regmap, 0x06, 0x00); if (ret) @@ -469,12 +819,42 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08); if (ret) goto err; + + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + buf[0] = m88ds3103b_dt_read(dev, 0x15); + buf[1] = m88ds3103b_dt_read(dev, 0x16); + + if (c->symbol_rate > 45010000) { + buf[0] &= ~0x03; + buf[0] |= 0x02; + buf[0] |= ((147 - 32) >> 8) & 0x01; + buf[1] = (147 - 32) & 0xFF; + + dev->mclk = 110250 * 1000; + } else { + buf[0] &= ~0x03; + buf[0] |= ((128 - 32) >> 8) & 0x01; + buf[1] = (128 - 32) & 0xFF; + + dev->mclk = 96000 * 1000; + } + m88ds3103b_dt_write(dev, 0x15, buf[0]); + m88ds3103b_dt_write(dev, 0x16, buf[1]); + + regmap_read(dev->regmap, 0x30, &u32tmp); + u32tmp &= ~0x80; + regmap_write(dev->regmap, 0x30, u32tmp & 0xff); + } + ret = regmap_write(dev->regmap, 0xf1, 0x01); if (ret) goto err; - ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80); - if (ret) - goto err; + + if (dev->chiptype != M88DS3103_CHIPTYPE_3103B) { + ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80); + if (ret) + goto err; + } } switch (dev->cfg->ts_mode) { @@ -488,6 +868,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) break; case M88DS3103_TS_PARALLEL: u8tmp = 0x02; + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + u8tmp = 0x01; + u8tmp1 = 0x01; + } break; case M88DS3103_TS_CI: u8tmp = 0x03; @@ -516,6 +900,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u8tmp1 = 0x3f; u8tmp2 = 0x3f; break; + case M88DS3103_TS_PARALLEL: + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + ret = m88ds3103_update_bits(dev, 0x29, 0x01, u8tmp1); + if (ret) + goto err; + } + /* fall through */ default: u16tmp = DIV_ROUND_UP(target_mclk, dev->cfg->ts_clk); u8tmp1 = u16tmp / 2 - 1; @@ -543,6 +934,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) else u8tmp = 0x06; + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) + m88ds3103b_set_mclk(dev, target_mclk / 1000); + ret = regmap_write(dev->regmap, 0xc3, 0x08); if (ret) goto err; @@ -578,6 +972,16 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) if (ret) goto err; + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + /* enable/disable 192M LDPC clock */ + ret = m88ds3103_update_bits(dev, 0x29, 0x10, + (c->delivery_system == SYS_DVBS) ? 0x10 : 0x0); + if (ret) + goto err; + + ret = m88ds3103_update_bits(dev, 0xc9, 0x08, 0x08); + } + dev_dbg(&client->dev, "carrier offset=%d\n", (tuner_frequency_khz - c->frequency)); @@ -642,7 +1046,7 @@ static int m88ds3103_init(struct dvb_frontend *fe) if (utmp) goto warm; - /* global reset, global diseqc reset, golbal fec reset */ + /* global reset, global diseqc reset, global fec reset */ ret = regmap_write(dev->regmap, 0x07, 0xe0); if (ret) goto err; @@ -652,12 +1056,15 @@ static int m88ds3103_init(struct dvb_frontend *fe) /* cold state - try to download firmware */ dev_info(&client->dev, "found a '%s' in cold state\n", - m88ds3103_ops.info.name); + dev->fe.ops.info.name); - if (dev->chip_id == M88RS6000_CHIP_ID) + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) + name = M88DS3103B_FIRMWARE; + else if (dev->chip_id == M88RS6000_CHIP_ID) name = M88RS6000_FIRMWARE; else name = M88DS3103_FIRMWARE; + /* request the firmware, this will block and timeout */ ret = request_firmware(&firmware, name, &client->dev); if (ret) { @@ -700,10 +1107,16 @@ static int m88ds3103_init(struct dvb_frontend *fe) } dev_info(&client->dev, "found a '%s' in warm state\n", - m88ds3103_ops.info.name); + dev->fe.ops.info.name); dev_info(&client->dev, "firmware version: %X.%X\n", (utmp >> 4) & 0xf, (utmp >> 0 & 0xf)); + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + m88ds3103b_dt_write(dev, 0x21, 0x92); + m88ds3103b_dt_write(dev, 0x15, 0x6C); + m88ds3103b_dt_write(dev, 0x17, 0xC1); + m88ds3103b_dt_write(dev, 0x17, 0x81); + } warm: /* warm state */ dev->warm = true; @@ -1393,6 +1806,8 @@ static int m88ds3103_probe(struct i2c_client *client, goto err_kfree; dev->chip_id = utmp >> 1; + dev->chiptype = (u8)id->driver_data; + dev_dbg(&client->dev, "chip_id=%02x\n", dev->chip_id); switch (dev->chip_id) { @@ -1459,7 +1874,10 @@ static int m88ds3103_probe(struct i2c_client *client, /* create dvb_frontend */ memcpy(&dev->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops)); - if (dev->chip_id == M88RS6000_CHIP_ID) + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) + strscpy(dev->fe.ops.info.name, "Montage Technology M88DS3103B", + sizeof(dev->fe.ops.info.name)); + else if (dev->chip_id == M88RS6000_CHIP_ID) strscpy(dev->fe.ops.info.name, "Montage Technology M88RS6000", sizeof(dev->fe.ops.info.name)); if (!pdata->attach_in_use) @@ -1470,6 +1888,26 @@ static int m88ds3103_probe(struct i2c_client *client, /* setup callbacks */ pdata->get_dvb_frontend = m88ds3103_get_dvb_frontend; pdata->get_i2c_adapter = m88ds3103_get_i2c_adapter; + + if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) { + /* enable i2c repeater for tuner */ + m88ds3103_update_bits(dev, 0x11, 0x01, 0x01); + + /* get frontend address */ + ret = regmap_read(dev->regmap, 0x29, &utmp); + if (ret) + goto err_kfree; + dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1; + dev_err(&client->dev, "dt addr is 0x%02x", dev->dt_addr); + + dev->dt_client = i2c_new_dummy_device(client->adapter, + dev->dt_addr); + if (!dev->dt_client) { + ret = -ENODEV; + goto err_kfree; + } + } + return 0; err_kfree: kfree(dev); @@ -1484,6 +1922,9 @@ static int m88ds3103_remove(struct i2c_client *client) dev_dbg(&client->dev, "\n"); + if (dev->dt_client) + i2c_unregister_device(dev->dt_client); + i2c_mux_del_adapters(dev->muxc); kfree(dev); @@ -1491,7 +1932,9 @@ static int m88ds3103_remove(struct i2c_client *client) } static const struct i2c_device_id m88ds3103_id_table[] = { - {"m88ds3103", 0}, + {"m88ds3103", M88DS3103_CHIPTYPE_3103}, + {"m88rs6000", M88DS3103_CHIPTYPE_RS6000}, + {"m88ds3103b", M88DS3103_CHIPTYPE_3103B}, {} }; MODULE_DEVICE_TABLE(i2c, m88ds3103_id_table); @@ -1513,3 +1956,4 @@ MODULE_DESCRIPTION("Montage Technology M88DS3103 DVB-S/S2 demodulator driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(M88DS3103_FIRMWARE); MODULE_FIRMWARE(M88RS6000_FIRMWARE); +MODULE_FIRMWARE(M88DS3103B_FIRMWARE); diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h index c825032f07ab..aa5306f40201 100644 --- a/drivers/media/dvb-frontends/m88ds3103_priv.h +++ b/drivers/media/dvb-frontends/m88ds3103_priv.h @@ -16,13 +16,20 @@ #include <linux/regmap.h> #include <linux/math64.h> -#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw" -#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw" +#define M88DS3103B_FIRMWARE "dvb-demod-m88ds3103b.fw" +#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw" +#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw" + #define M88RS6000_CHIP_ID 0x74 #define M88DS3103_CHIP_ID 0x70 +#define M88DS3103_CHIPTYPE_3103 0 +#define M88DS3103_CHIPTYPE_RS6000 1 +#define M88DS3103_CHIPTYPE_3103B 2 + struct m88ds3103_dev { struct i2c_client *client; + struct i2c_client *dt_client; struct regmap_config regmap_config; struct regmap *regmap; struct m88ds3103_config config; @@ -35,10 +42,13 @@ struct m88ds3103_dev { struct i2c_mux_core *muxc; /* auto detect chip id to do different config */ u8 chip_id; + /* chip type to differentiate m88rs6000 from m88ds3103b */ + u8 chiptype; /* main mclk is calculated for M88RS6000 dynamically */ s32 mclk; u64 post_bit_error; u64 post_bit_count; + u8 dt_addr; }; struct m88ds3103_reg_val { diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 1953b00b3e48..685c0ac71819 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -470,10 +470,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) goto error; if (dev->delivery_system == SYS_DVBS) { - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; + u32 bit_error = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3] << 0; + + dev->dvbv3_ber = bit_error; + dev->post_bit_error += bit_error; c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; dev->block_error += buf[4] << 8 | buf[5] << 0; diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index c68e002d26ea..125d596c13dd 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -238,6 +238,7 @@ config VIDEO_ADV7604 tristate "Analog Devices ADV7604 decoder" depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API depends on GPIOLIB || COMPILE_TEST + select REGMAP_I2C select HDMI select V4L2_FWNODE help @@ -379,6 +380,7 @@ config VIDEO_TVP5150 tristate "Texas Instruments TVP5150 video decoder" depends on VIDEO_V4L2 && I2C select V4L2_FWNODE + select REGMAP_I2C help Support for the Texas Instruments TVP5150 video decoder. @@ -584,6 +586,7 @@ config VIDEO_IMX214 tristate "Sony IMX214 sensor support" depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on V4L2_FWNODE + select REGMAP_I2C help This is a Video4Linux2 sensor driver for the Sony IMX214 camera. @@ -591,6 +594,17 @@ config VIDEO_IMX214 To compile this driver as a module, choose M here: the module will be called imx214. +config VIDEO_IMX219 + tristate "Sony IMX219 sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + select V4L2_FWNODE + help + This is a Video4Linux2 sensor driver for the Sony + IMX219 camera. + + To compile this driver as a module, choose M here: the + module will be called imx219. + config VIDEO_IMX258 tristate "Sony IMX258 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API @@ -612,6 +626,7 @@ config VIDEO_IMX274 config VIDEO_IMX290 tristate "Sony IMX290 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + select REGMAP_I2C select V4L2_FWNODE help This is a Video4Linux2 sensor driver for the Sony @@ -804,6 +819,7 @@ config VIDEO_OV7670 config VIDEO_OV7740 tristate "OmniVision OV7740 sensor support" depends on I2C && VIDEO_V4L2 + select REGMAP_I2C help This is a Video4Linux2 sensor driver for the OmniVision OV7740 VGA camera sensor. diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index c147bb9d28db..77bf7d0b691f 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -111,6 +111,7 @@ obj-$(CONFIG_VIDEO_OV2659) += ov2659.o obj-$(CONFIG_VIDEO_TC358743) += tc358743.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_VIDEO_IMX214) += imx214.o +obj-$(CONFIG_VIDEO_IMX219) += imx219.o obj-$(CONFIG_VIDEO_IMX258) += imx258.o obj-$(CONFIG_VIDEO_IMX274) += imx274.o obj-$(CONFIG_VIDEO_IMX290) += imx290.o diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c index 6528e2343fc8..00159daa6fcd 100644 --- a/drivers/media/i2c/adv7180.c +++ b/drivers/media/i2c/adv7180.c @@ -749,6 +749,17 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd, return ret; } +static int adv7180_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ + struct v4l2_subdev_format fmt = { + .which = cfg ? V4L2_SUBDEV_FORMAT_TRY + : V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + return adv7180_set_pad_format(sd, cfg, &fmt); +} + static int adv7180_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { @@ -854,6 +865,7 @@ static const struct v4l2_subdev_core_ops adv7180_core_ops = { }; static const struct v4l2_subdev_pad_ops adv7180_pad_ops = { + .init_cfg = adv7180_init_cfg, .enum_mbus_code = adv7180_enum_mbus_code, .set_fmt = adv7180_set_pad_format, .get_fmt = adv7180_get_pad_format, diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c index adcaaa8c86d1..4175d06ffd47 100644 --- a/drivers/media/i2c/imx214.c +++ b/drivers/media/i2c/imx214.c @@ -803,7 +803,6 @@ err_rpm_put: static int imx214_g_frame_interval(struct v4l2_subdev *subdev, struct v4l2_subdev_frame_interval *fival) { - fival->pad = 0; fival->interval.numerator = 1; fival->interval.denominator = IMX214_FPS; diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c new file mode 100644 index 000000000000..cb03bdec1f9c --- /dev/null +++ b/drivers/media/i2c/imx219.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A V4L2 driver for Sony IMX219 cameras. + * Copyright (C) 2019, Raspberry Pi (Trading) Ltd + * + * Based on Sony imx258 camera driver + * Copyright (C) 2018 Intel Corporation + * + * DT / fwnode changes, and regulator / GPIO control taken from imx214 driver + * Copyright 2018 Qtechnology A/S + * + * Flip handling taken from the Sony IMX319 driver. + * Copyright (C) 2018 Intel Corporation + * + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-fwnode.h> +#include <media/v4l2-mediabus.h> +#include <asm/unaligned.h> + +#define IMX219_REG_VALUE_08BIT 1 +#define IMX219_REG_VALUE_16BIT 2 + +#define IMX219_REG_MODE_SELECT 0x0100 +#define IMX219_MODE_STANDBY 0x00 +#define IMX219_MODE_STREAMING 0x01 + +/* Chip ID */ +#define IMX219_REG_CHIP_ID 0x0000 +#define IMX219_CHIP_ID 0x0219 + +/* External clock frequency is 24.0M */ +#define IMX219_XCLK_FREQ 24000000 + +/* Pixel rate is fixed at 182.4M for all the modes */ +#define IMX219_PIXEL_RATE 182400000 + +#define IMX219_DEFAULT_LINK_FREQ 456000000 + +/* V_TIMING internal */ +#define IMX219_REG_VTS 0x0160 +#define IMX219_VTS_15FPS 0x0dc6 +#define IMX219_VTS_30FPS_1080P 0x06e3 +#define IMX219_VTS_30FPS_BINNED 0x06e3 +#define IMX219_VTS_30FPS_640x480 0x06e3 +#define IMX219_VTS_MAX 0xffff + +#define IMX219_VBLANK_MIN 4 + +/*Frame Length Line*/ +#define IMX219_FLL_MIN 0x08a6 +#define IMX219_FLL_MAX 0xffff +#define IMX219_FLL_STEP 1 +#define IMX219_FLL_DEFAULT 0x0c98 + +/* HBLANK control - read only */ +#define IMX219_PPL_DEFAULT 3448 + +/* Exposure control */ +#define IMX219_REG_EXPOSURE 0x015a +#define IMX219_EXPOSURE_MIN 4 +#define IMX219_EXPOSURE_STEP 1 +#define IMX219_EXPOSURE_DEFAULT 0x640 +#define IMX219_EXPOSURE_MAX 65535 + +/* Analog gain control */ +#define IMX219_REG_ANALOG_GAIN 0x0157 +#define IMX219_ANA_GAIN_MIN 0 +#define IMX219_ANA_GAIN_MAX 232 +#define IMX219_ANA_GAIN_STEP 1 +#define IMX219_ANA_GAIN_DEFAULT 0x0 + +/* Digital gain control */ +#define IMX219_REG_DIGITAL_GAIN 0x0158 +#define IMX219_DGTL_GAIN_MIN 0x0100 +#define IMX219_DGTL_GAIN_MAX 0x0fff +#define IMX219_DGTL_GAIN_DEFAULT 0x0100 +#define IMX219_DGTL_GAIN_STEP 1 + +#define IMX219_REG_ORIENTATION 0x0172 + +/* Test Pattern Control */ +#define IMX219_REG_TEST_PATTERN 0x0600 +#define IMX219_TEST_PATTERN_DISABLE 0 +#define IMX219_TEST_PATTERN_SOLID_COLOR 1 +#define IMX219_TEST_PATTERN_COLOR_BARS 2 +#define IMX219_TEST_PATTERN_GREY_COLOR 3 +#define IMX219_TEST_PATTERN_PN9 4 + +/* Test pattern colour components */ +#define IMX219_REG_TESTP_RED 0x0602 +#define IMX219_REG_TESTP_GREENR 0x0604 +#define IMX219_REG_TESTP_BLUE 0x0606 +#define IMX219_REG_TESTP_GREENB 0x0608 +#define IMX219_TESTP_COLOUR_MIN 0 +#define IMX219_TESTP_COLOUR_MAX 0x03ff +#define IMX219_TESTP_COLOUR_STEP 1 +#define IMX219_TESTP_RED_DEFAULT IMX219_TESTP_COLOUR_MAX +#define IMX219_TESTP_GREENR_DEFAULT 0 +#define IMX219_TESTP_BLUE_DEFAULT 0 +#define IMX219_TESTP_GREENB_DEFAULT 0 + +struct imx219_reg { + u16 address; + u8 val; +}; + +struct imx219_reg_list { + unsigned int num_of_regs; + const struct imx219_reg *regs; +}; + +/* Mode : resolution and related config&values */ +struct imx219_mode { + /* Frame width */ + unsigned int width; + /* Frame height */ + unsigned int height; + + /* V-timing */ + unsigned int vts_def; + + /* Default register values */ + struct imx219_reg_list reg_list; +}; + +/* + * Register sets lifted off the i2C interface from the Raspberry Pi firmware + * driver. + * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7. + */ +static const struct imx219_reg mode_3280x2464_regs[] = { + {0x0100, 0x00}, + {0x30eb, 0x0c}, + {0x30eb, 0x05}, + {0x300a, 0xff}, + {0x300b, 0xff}, + {0x30eb, 0x05}, + {0x30eb, 0x09}, + {0x0114, 0x01}, + {0x0128, 0x00}, + {0x012a, 0x18}, + {0x012b, 0x00}, + {0x0164, 0x00}, + {0x0165, 0x00}, + {0x0166, 0x0c}, + {0x0167, 0xcf}, + {0x0168, 0x00}, + {0x0169, 0x00}, + {0x016a, 0x09}, + {0x016b, 0x9f}, + {0x016c, 0x0c}, + {0x016d, 0xd0}, + {0x016e, 0x09}, + {0x016f, 0xa0}, + {0x0170, 0x01}, + {0x0171, 0x01}, + {0x0174, 0x00}, + {0x0175, 0x00}, + {0x0301, 0x05}, + {0x0303, 0x01}, + {0x0304, 0x03}, + {0x0305, 0x03}, + {0x0306, 0x00}, + {0x0307, 0x39}, + {0x030b, 0x01}, + {0x030c, 0x00}, + {0x030d, 0x72}, + {0x0624, 0x0c}, + {0x0625, 0xd0}, + {0x0626, 0x09}, + {0x0627, 0xa0}, + {0x455e, 0x00}, + {0x471e, 0x4b}, + {0x4767, 0x0f}, + {0x4750, 0x14}, + {0x4540, 0x00}, + {0x47b4, 0x14}, + {0x4713, 0x30}, + {0x478b, 0x10}, + {0x478f, 0x10}, + {0x4793, 0x10}, + {0x4797, 0x0e}, + {0x479b, 0x0e}, + {0x0162, 0x0d}, + {0x0163, 0x78}, +}; + +static const struct imx219_reg mode_1920_1080_regs[] = { + {0x0100, 0x00}, + {0x30eb, 0x05}, + {0x30eb, 0x0c}, + {0x300a, 0xff}, + {0x300b, 0xff}, + {0x30eb, 0x05}, + {0x30eb, 0x09}, + {0x0114, 0x01}, + {0x0128, 0x00}, + {0x012a, 0x18}, + {0x012b, 0x00}, + {0x0162, 0x0d}, + {0x0163, 0x78}, + {0x0164, 0x02}, + {0x0165, 0xa8}, + {0x0166, 0x0a}, + {0x0167, 0x27}, + {0x0168, 0x02}, + {0x0169, 0xb4}, + {0x016a, 0x06}, + {0x016b, 0xeb}, + {0x016c, 0x07}, + {0x016d, 0x80}, + {0x016e, 0x04}, + {0x016f, 0x38}, + {0x0170, 0x01}, + {0x0171, 0x01}, + {0x0174, 0x00}, + {0x0175, 0x00}, + {0x0301, 0x05}, + {0x0303, 0x01}, + {0x0304, 0x03}, + {0x0305, 0x03}, + {0x0306, 0x00}, + {0x0307, 0x39}, + {0x030b, 0x01}, + {0x030c, 0x00}, + {0x030d, 0x72}, + {0x0624, 0x07}, + {0x0625, 0x80}, + {0x0626, 0x04}, + {0x0627, 0x38}, + {0x455e, 0x00}, + {0x471e, 0x4b}, + {0x4767, 0x0f}, + {0x4750, 0x14}, + {0x4540, 0x00}, + {0x47b4, 0x14}, + {0x4713, 0x30}, + {0x478b, 0x10}, + {0x478f, 0x10}, + {0x4793, 0x10}, + {0x4797, 0x0e}, + {0x479b, 0x0e}, + {0x0162, 0x0d}, + {0x0163, 0x78}, +}; + +static const struct imx219_reg mode_1640_1232_regs[] = { + {0x0100, 0x00}, + {0x30eb, 0x0c}, + {0x30eb, 0x05}, + {0x300a, 0xff}, + {0x300b, 0xff}, + {0x30eb, 0x05}, + {0x30eb, 0x09}, + {0x0114, 0x01}, + {0x0128, 0x00}, + {0x012a, 0x18}, + {0x012b, 0x00}, + {0x0164, 0x00}, + {0x0165, 0x00}, + {0x0166, 0x0c}, + {0x0167, 0xcf}, + {0x0168, 0x00}, + {0x0169, 0x00}, + {0x016a, 0x09}, + {0x016b, 0x9f}, + {0x016c, 0x06}, + {0x016d, 0x68}, + {0x016e, 0x04}, + {0x016f, 0xd0}, + {0x0170, 0x01}, + {0x0171, 0x01}, + {0x0174, 0x01}, + {0x0175, 0x01}, + {0x0301, 0x05}, + {0x0303, 0x01}, + {0x0304, 0x03}, + {0x0305, 0x03}, + {0x0306, 0x00}, + {0x0307, 0x39}, + {0x030b, 0x01}, + {0x030c, 0x00}, + {0x030d, 0x72}, + {0x0624, 0x06}, + {0x0625, 0x68}, + {0x0626, 0x04}, + {0x0627, 0xd0}, + {0x455e, 0x00}, + {0x471e, 0x4b}, + {0x4767, 0x0f}, + {0x4750, 0x14}, + {0x4540, 0x00}, + {0x47b4, 0x14}, + {0x4713, 0x30}, + {0x478b, 0x10}, + {0x478f, 0x10}, + {0x4793, 0x10}, + {0x4797, 0x0e}, + {0x479b, 0x0e}, + {0x0162, 0x0d}, + {0x0163, 0x78}, +}; + +static const struct imx219_reg mode_640_480_regs[] = { + {0x0100, 0x00}, + {0x30eb, 0x05}, + {0x30eb, 0x0c}, + {0x300a, 0xff}, + {0x300b, 0xff}, + {0x30eb, 0x05}, + {0x30eb, 0x09}, + {0x0114, 0x01}, + {0x0128, 0x00}, + {0x012a, 0x18}, + {0x012b, 0x00}, + {0x0162, 0x0d}, + {0x0163, 0x78}, + {0x0164, 0x03}, + {0x0165, 0xe8}, + {0x0166, 0x08}, + {0x0167, 0xe7}, + {0x0168, 0x02}, + {0x0169, 0xf0}, + {0x016a, 0x06}, + {0x016b, 0xaf}, + {0x016c, 0x02}, + {0x016d, 0x80}, + {0x016e, 0x01}, + {0x016f, 0xe0}, + {0x0170, 0x01}, + {0x0171, 0x01}, + {0x0174, 0x03}, + {0x0175, 0x03}, + {0x0301, 0x05}, + {0x0303, 0x01}, + {0x0304, 0x03}, + {0x0305, 0x03}, + {0x0306, 0x00}, + {0x0307, 0x39}, + {0x030b, 0x01}, + {0x030c, 0x00}, + {0x030d, 0x72}, + {0x0624, 0x06}, + {0x0625, 0x68}, + {0x0626, 0x04}, + {0x0627, 0xd0}, + {0x455e, 0x00}, + {0x471e, 0x4b}, + {0x4767, 0x0f}, + {0x4750, 0x14}, + {0x4540, 0x00}, + {0x47b4, 0x14}, + {0x4713, 0x30}, + {0x478b, 0x10}, + {0x478f, 0x10}, + {0x4793, 0x10}, + {0x4797, 0x0e}, + {0x479b, 0x0e}, +}; + +static const struct imx219_reg raw8_framefmt_regs[] = { + {0x018c, 0x08}, + {0x018d, 0x08}, + {0x0309, 0x08}, +}; + +static const struct imx219_reg raw10_framefmt_regs[] = { + {0x018c, 0x0a}, + {0x018d, 0x0a}, + {0x0309, 0x0a}, +}; + +static const char * const imx219_test_pattern_menu[] = { + "Disabled", + "Color Bars", + "Solid Color", + "Grey Color Bars", + "PN9" +}; + +static const int imx219_test_pattern_val[] = { + IMX219_TEST_PATTERN_DISABLE, + IMX219_TEST_PATTERN_COLOR_BARS, + IMX219_TEST_PATTERN_SOLID_COLOR, + IMX219_TEST_PATTERN_GREY_COLOR, + IMX219_TEST_PATTERN_PN9, +}; + +/* regulator supplies */ +static const char * const imx219_supply_name[] = { + /* Supplies can be enabled in any order */ + "VANA", /* Analog (2.8V) supply */ + "VDIG", /* Digital Core (1.8V) supply */ + "VDDL", /* IF (1.2V) supply */ +}; + +#define IMX219_NUM_SUPPLIES ARRAY_SIZE(imx219_supply_name) + +/* + * The supported formats. + * This table MUST contain 4 entries per format, to cover the various flip + * combinations in the order + * - no flip + * - h flip + * - v flip + * - h&v flips + */ +static const u32 codes[] = { + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SBGGR10_1X10, + + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SBGGR8_1X8, +}; + +/* + * Initialisation delay between XCLR low->high and the moment when the sensor + * can start capture (i.e. can leave software stanby) must be not less than: + * t4 + max(t5, t6 + <time to initialize the sensor register over I2C>) + * where + * t4 is fixed, and is max 200uS, + * t5 is fixed, and is 6000uS, + * t6 depends on the sensor external clock, and is max 32000 clock periods. + * As per sensor datasheet, the external clock must be from 6MHz to 27MHz. + * So for any acceptable external clock t6 is always within the range of + * 1185 to 5333 uS, and is always less than t5. + * For this reason this is always safe to wait (t4 + t5) = 6200 uS, then + * initialize the sensor over I2C, and then exit the software standby. + * + * This start-up time can be optimized a bit more, if we start the writes + * over I2C after (t4+t6), but before (t4+t5) expires. But then sensor + * initialization over I2C may complete before (t4+t5) expires, and we must + * ensure that capture is not started before (t4+t5). + * + * This delay doesn't account for the power supply startup time. If needed, + * this should be taken care of via the regulator framework. E.g. in the + * case of DT for regulator-fixed one should define the startup-delay-us + * property. + */ +#define IMX219_XCLR_MIN_DELAY_US 6200 +#define IMX219_XCLR_DELAY_RANGE_US 1000 + +/* Mode configs */ +static const struct imx219_mode supported_modes[] = { + { + /* 8MPix 15fps mode */ + .width = 3280, + .height = 2464, + .vts_def = IMX219_VTS_15FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), + .regs = mode_3280x2464_regs, + }, + }, + { + /* 1080P 30fps cropped */ + .width = 1920, + .height = 1080, + .vts_def = IMX219_VTS_30FPS_1080P, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs), + .regs = mode_1920_1080_regs, + }, + }, + { + /* 2x2 binned 30fps mode */ + .width = 1640, + .height = 1232, + .vts_def = IMX219_VTS_30FPS_BINNED, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640_1232_regs), + .regs = mode_1640_1232_regs, + }, + }, + { + /* 640x480 30fps mode */ + .width = 640, + .height = 480, + .vts_def = IMX219_VTS_30FPS_640x480, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_640_480_regs), + .regs = mode_640_480_regs, + }, + }, +}; + +struct imx219 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_mbus_framefmt fmt; + + struct clk *xclk; /* system clock to IMX219 */ + u32 xclk_freq; + + struct gpio_desc *reset_gpio; + struct regulator_bulk_data supplies[IMX219_NUM_SUPPLIES]; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *exposure; + struct v4l2_ctrl *vflip; + struct v4l2_ctrl *hflip; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + + /* Current mode */ + const struct imx219_mode *mode; + + /* + * Mutex for serialized access: + * Protect sensor module set pad format and start/stop streaming safely. + */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd) +{ + return container_of(_sd, struct imx219, sd); +} + +/* Read registers up to 2 at a time */ +static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + struct i2c_msg msgs[2]; + u8 addr_buf[2] = { reg >> 8, reg & 0xff }; + u8 data_buf[4] = { 0, }; + int ret; + + if (len > 4) + return -EINVAL; + + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = ARRAY_SIZE(addr_buf); + msgs[0].buf = addr_buf; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_buf[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = get_unaligned_be32(data_buf); + + return 0; +} + +/* Write registers up to 2 at a time */ +static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + u8 buf[6]; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, buf); + put_unaligned_be32(val << (8 * (4 - len)), buf + 2); + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int imx219_write_regs(struct imx219 *imx219, + const struct imx219_reg *regs, u32 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val); + if (ret) { + dev_err_ratelimited(&client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +/* Get bayer order based on flip setting. */ +static u32 imx219_get_format_code(struct imx219 *imx219, u32 code) +{ + unsigned int i; + + lockdep_assert_held(&imx219->mutex); + + for (i = 0; i < ARRAY_SIZE(codes); i++) + if (codes[i] == code) + break; + + if (i >= ARRAY_SIZE(codes)) + i = 0; + + i = (i & ~3) | (imx219->vflip->val ? 2 : 0) | + (imx219->hflip->val ? 1 : 0); + + return codes[i]; +} + +static void imx219_set_default_format(struct imx219 *imx219) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = &imx219->fmt; + fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace); + fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, + fmt->colorspace, + fmt->ycbcr_enc); + fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace); + fmt->width = supported_modes[0].width; + fmt->height = supported_modes[0].height; + fmt->field = V4L2_FIELD_NONE; +} + +static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct imx219 *imx219 = to_imx219(sd); + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, 0); + + mutex_lock(&imx219->mutex); + + /* Initialize try_fmt */ + try_fmt->width = supported_modes[0].width; + try_fmt->height = supported_modes[0].height; + try_fmt->code = imx219_get_format_code(imx219, + MEDIA_BUS_FMT_SRGGB10_1X10); + try_fmt->field = V4L2_FIELD_NONE; + + mutex_unlock(&imx219->mutex); + + return 0; +} + +static int imx219_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct imx219 *imx219 = + container_of(ctrl->handler, struct imx219, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + int ret; + + if (ctrl->id == V4L2_CID_VBLANK) { + int exposure_max, exposure_def; + + /* Update max exposure while meeting expected vblanking */ + exposure_max = imx219->mode->height + ctrl->val - 4; + exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ? + exposure_max : IMX219_EXPOSURE_DEFAULT; + __v4l2_ctrl_modify_range(imx219->exposure, + imx219->exposure->minimum, + exposure_max, imx219->exposure->step, + exposure_def); + } + + /* + * Applying V4L2 control value only happens + * when power is up for streaming + */ + if (pm_runtime_get_if_in_use(&client->dev) == 0) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN, + IMX219_REG_VALUE_08BIT, ctrl->val); + break; + case V4L2_CID_EXPOSURE: + ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN, + IMX219_REG_VALUE_16BIT, + imx219_test_pattern_val[ctrl->val]); + break; + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1, + imx219->hflip->val | + imx219->vflip->val << 1); + break; + case V4L2_CID_VBLANK: + ret = imx219_write_reg(imx219, IMX219_REG_VTS, + IMX219_REG_VALUE_16BIT, + imx219->mode->height + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN_RED: + ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN_GREENR: + ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN_BLUE: + ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN_GREENB: + ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB, + IMX219_REG_VALUE_16BIT, ctrl->val); + break; + default: + dev_info(&client->dev, + "ctrl(id:0x%x,val:0x%x) is not handled\n", + ctrl->id, ctrl->val); + ret = -EINVAL; + break; + } + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops imx219_ctrl_ops = { + .s_ctrl = imx219_set_ctrl, +}; + +static int imx219_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct imx219 *imx219 = to_imx219(sd); + + if (code->index >= (ARRAY_SIZE(codes) / 4)) + return -EINVAL; + + code->code = imx219_get_format_code(imx219, codes[code->index * 4]); + + return 0; +} + +static int imx219_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct imx219 *imx219 = to_imx219(sd); + + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != imx219_get_format_code(imx219, imx219->fmt.code)) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt) +{ + fmt->colorspace = V4L2_COLORSPACE_SRGB; + fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace); + fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, + fmt->colorspace, + fmt->ycbcr_enc); + fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace); +} + +static void imx219_update_pad_format(struct imx219 *imx219, + const struct imx219_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.field = V4L2_FIELD_NONE; + imx219_reset_colorspace(&fmt->format); +} + +static int __imx219_get_pad_format(struct imx219 *imx219, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(&imx219->sd, cfg, fmt->pad); + /* update the code which could change due to vflip or hflip: */ + try_fmt->code = imx219_get_format_code(imx219, try_fmt->code); + fmt->format = *try_fmt; + } else { + imx219_update_pad_format(imx219, imx219->mode, fmt); + fmt->format.code = imx219_get_format_code(imx219, + imx219->fmt.code); + } + + return 0; +} + +static int imx219_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx219 *imx219 = to_imx219(sd); + int ret; + + mutex_lock(&imx219->mutex); + ret = __imx219_get_pad_format(imx219, cfg, fmt); + mutex_unlock(&imx219->mutex); + + return ret; +} + +static int imx219_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx219 *imx219 = to_imx219(sd); + const struct imx219_mode *mode; + struct v4l2_mbus_framefmt *framefmt; + int exposure_max, exposure_def, hblank; + unsigned int i; + + mutex_lock(&imx219->mutex); + + for (i = 0; i < ARRAY_SIZE(codes); i++) + if (codes[i] == fmt->format.code) + break; + if (i >= ARRAY_SIZE(codes)) + i = 0; + + /* Bayer order varies with flips */ + fmt->format.code = imx219_get_format_code(imx219, codes[i]); + + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), + width, height, + fmt->format.width, fmt->format.height); + imx219_update_pad_format(imx219, mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + *framefmt = fmt->format; + } else if (imx219->mode != mode || + imx219->fmt.code != fmt->format.code) { + imx219->fmt = fmt->format; + imx219->mode = mode; + /* Update limits and set FPS to default */ + __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN, + IMX219_VTS_MAX - mode->height, 1, + mode->vts_def - mode->height); + __v4l2_ctrl_s_ctrl(imx219->vblank, + mode->vts_def - mode->height); + /* Update max exposure while meeting expected vblanking */ + exposure_max = mode->vts_def - 4; + exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ? + exposure_max : IMX219_EXPOSURE_DEFAULT; + __v4l2_ctrl_modify_range(imx219->exposure, + imx219->exposure->minimum, + exposure_max, imx219->exposure->step, + exposure_def); + /* + * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank + * depends on mode->width only, and is not changeble in any + * way other than changing the mode. + */ + hblank = IMX219_PPL_DEFAULT - mode->width; + __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1, + hblank); + } + + mutex_unlock(&imx219->mutex); + + return 0; +} + +static int imx219_set_framefmt(struct imx219 *imx219) +{ + switch (imx219->fmt.code) { + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SBGGR8_1X8: + return imx219_write_regs(imx219, raw8_framefmt_regs, + ARRAY_SIZE(raw8_framefmt_regs)); + + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + return imx219_write_regs(imx219, raw10_framefmt_regs, + ARRAY_SIZE(raw10_framefmt_regs)); + } + + return -EINVAL; +} + +static int imx219_start_streaming(struct imx219 *imx219) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + const struct imx219_reg_list *reg_list; + int ret; + + /* Apply default values of current mode */ + reg_list = &imx219->mode->reg_list; + ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + ret = imx219_set_framefmt(imx219); + if (ret) { + dev_err(&client->dev, "%s failed to set frame format: %d\n", + __func__, ret); + return ret; + } + + /* Apply customized values from user */ + ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler); + if (ret) + return ret; + + /* set stream on register */ + return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, + IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING); +} + +static void imx219_stop_streaming(struct imx219 *imx219) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + int ret; + + /* set stream off register */ + ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, + IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY); + if (ret) + dev_err(&client->dev, "%s failed to set stream\n", __func__); +} + +static int imx219_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct imx219 *imx219 = to_imx219(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&imx219->mutex); + if (imx219->streaming == enable) { + mutex_unlock(&imx219->mutex); + return 0; + } + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto err_unlock; + } + + /* + * Apply default & customized values + * and then start streaming. + */ + ret = imx219_start_streaming(imx219); + if (ret) + goto err_rpm_put; + } else { + imx219_stop_streaming(imx219); + pm_runtime_put(&client->dev); + } + + imx219->streaming = enable; + + /* vflip and hflip cannot change during streaming */ + __v4l2_ctrl_grab(imx219->vflip, enable); + __v4l2_ctrl_grab(imx219->hflip, enable); + + mutex_unlock(&imx219->mutex); + + return ret; + +err_rpm_put: + pm_runtime_put(&client->dev); +err_unlock: + mutex_unlock(&imx219->mutex); + + return ret; +} + +/* Power/clock management functions */ +static int imx219_power_on(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx219 *imx219 = to_imx219(sd); + int ret; + + ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES, + imx219->supplies); + if (ret) { + dev_err(&client->dev, "%s: failed to enable regulators\n", + __func__); + return ret; + } + + ret = clk_prepare_enable(imx219->xclk); + if (ret) { + dev_err(&client->dev, "%s: failed to enable clock\n", + __func__); + goto reg_off; + } + + gpiod_set_value_cansleep(imx219->reset_gpio, 1); + usleep_range(IMX219_XCLR_MIN_DELAY_US, + IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US); + + return 0; + +reg_off: + regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies); + + return ret; +} + +static int imx219_power_off(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx219 *imx219 = to_imx219(sd); + + gpiod_set_value_cansleep(imx219->reset_gpio, 0); + regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies); + clk_disable_unprepare(imx219->xclk); + + return 0; +} + +static int __maybe_unused imx219_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx219 *imx219 = to_imx219(sd); + + if (imx219->streaming) + imx219_stop_streaming(imx219); + + return 0; +} + +static int __maybe_unused imx219_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx219 *imx219 = to_imx219(sd); + int ret; + + if (imx219->streaming) { + ret = imx219_start_streaming(imx219); + if (ret) + goto error; + } + + return 0; + +error: + imx219_stop_streaming(imx219); + imx219->streaming = 0; + + return ret; +} + +static int imx219_get_regulators(struct imx219 *imx219) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + unsigned int i; + + for (i = 0; i < IMX219_NUM_SUPPLIES; i++) + imx219->supplies[i].supply = imx219_supply_name[i]; + + return devm_regulator_bulk_get(&client->dev, + IMX219_NUM_SUPPLIES, + imx219->supplies); +} + +/* Verify chip ID */ +static int imx219_identify_module(struct imx219 *imx219) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + int ret; + u32 val; + + ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID, + IMX219_REG_VALUE_16BIT, &val); + if (ret) { + dev_err(&client->dev, "failed to read chip id %x\n", + IMX219_CHIP_ID); + return ret; + } + + if (val != IMX219_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + IMX219_CHIP_ID, val); + return -EIO; + } + + return 0; +} + +static const struct v4l2_subdev_core_ops imx219_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_video_ops imx219_video_ops = { + .s_stream = imx219_set_stream, +}; + +static const struct v4l2_subdev_pad_ops imx219_pad_ops = { + .enum_mbus_code = imx219_enum_mbus_code, + .get_fmt = imx219_get_pad_format, + .set_fmt = imx219_set_pad_format, + .enum_frame_size = imx219_enum_frame_size, +}; + +static const struct v4l2_subdev_ops imx219_subdev_ops = { + .core = &imx219_core_ops, + .video = &imx219_video_ops, + .pad = &imx219_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops imx219_internal_ops = { + .open = imx219_open, +}; + +/* Initialize control handlers */ +static int imx219_init_controls(struct imx219 *imx219) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); + struct v4l2_ctrl_handler *ctrl_hdlr; + unsigned int height = imx219->mode->height; + int exposure_max, exposure_def, hblank; + int i, ret; + + ctrl_hdlr = &imx219->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); + if (ret) + return ret; + + mutex_init(&imx219->mutex); + ctrl_hdlr->lock = &imx219->mutex; + + /* By default, PIXEL_RATE is read only */ + imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_PIXEL_RATE, + IMX219_PIXEL_RATE, + IMX219_PIXEL_RATE, 1, + IMX219_PIXEL_RATE); + + /* Initial vblank/hblank/exposure parameters based on current mode */ + imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_VBLANK, IMX219_VBLANK_MIN, + IMX219_VTS_MAX - height, 1, + imx219->mode->vts_def - height); + hblank = IMX219_PPL_DEFAULT - imx219->mode->width; + imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_HBLANK, hblank, hblank, + 1, hblank); + if (imx219->hblank) + imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + exposure_max = imx219->mode->vts_def - 4; + exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ? + exposure_max : IMX219_EXPOSURE_DEFAULT; + imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_EXPOSURE, + IMX219_EXPOSURE_MIN, exposure_max, + IMX219_EXPOSURE_STEP, + exposure_def); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX, + IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX, + IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT); + + imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + if (imx219->hflip) + imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + + imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + if (imx219->vflip) + imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(imx219_test_pattern_menu) - 1, + 0, 0, imx219_test_pattern_menu); + for (i = 0; i < 4; i++) { + /* + * The assumption is that + * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1 + * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2 + * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3 + */ + v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, + V4L2_CID_TEST_PATTERN_RED + i, + IMX219_TESTP_COLOUR_MIN, + IMX219_TESTP_COLOUR_MAX, + IMX219_TESTP_COLOUR_STEP, + IMX219_TESTP_COLOUR_MAX); + /* The "Solid color" pattern is white by default */ + } + + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + dev_err(&client->dev, "%s control init failed (%d)\n", + __func__, ret); + goto error; + } + + imx219->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + mutex_destroy(&imx219->mutex); + + return ret; +} + +static void imx219_free_controls(struct imx219 *imx219) +{ + v4l2_ctrl_handler_free(imx219->sd.ctrl_handler); + mutex_destroy(&imx219->mutex); +} + +static int imx219_check_hwcfg(struct device *dev) +{ + struct fwnode_handle *endpoint; + struct v4l2_fwnode_endpoint ep_cfg = { + .bus_type = V4L2_MBUS_CSI2_DPHY + }; + int ret = -EINVAL; + + endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL); + if (!endpoint) { + dev_err(dev, "endpoint node not found\n"); + return -EINVAL; + } + + if (v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep_cfg)) { + dev_err(dev, "could not parse endpoint\n"); + goto error_out; + } + + /* Check the number of MIPI CSI2 data lanes */ + if (ep_cfg.bus.mipi_csi2.num_data_lanes != 2) { + dev_err(dev, "only 2 data lanes are currently supported\n"); + goto error_out; + } + + /* Check the link frequency set in device tree */ + if (!ep_cfg.nr_of_link_frequencies) { + dev_err(dev, "link-frequency property not found in DT\n"); + goto error_out; + } + + if (ep_cfg.nr_of_link_frequencies != 1 || + ep_cfg.link_frequencies[0] != IMX219_DEFAULT_LINK_FREQ) { + dev_err(dev, "Link frequency not supported: %lld\n", + ep_cfg.link_frequencies[0]); + goto error_out; + } + + ret = 0; + +error_out: + v4l2_fwnode_endpoint_free(&ep_cfg); + fwnode_handle_put(endpoint); + + return ret; +} + +static int imx219_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct imx219 *imx219; + int ret; + + imx219 = devm_kzalloc(&client->dev, sizeof(*imx219), GFP_KERNEL); + if (!imx219) + return -ENOMEM; + + v4l2_i2c_subdev_init(&imx219->sd, client, &imx219_subdev_ops); + + /* Check the hardware configuration in device tree */ + if (imx219_check_hwcfg(dev)) + return -EINVAL; + + /* Get system clock (xclk) */ + imx219->xclk = devm_clk_get(dev, NULL); + if (IS_ERR(imx219->xclk)) { + dev_err(dev, "failed to get xclk\n"); + return PTR_ERR(imx219->xclk); + } + + imx219->xclk_freq = clk_get_rate(imx219->xclk); + if (imx219->xclk_freq != IMX219_XCLK_FREQ) { + dev_err(dev, "xclk frequency not supported: %d Hz\n", + imx219->xclk_freq); + return -EINVAL; + } + + ret = imx219_get_regulators(imx219); + if (ret) { + dev_err(dev, "failed to get regulators\n"); + return ret; + } + + /* Request optional enable pin */ + imx219->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + + /* + * The sensor must be powered for imx219_identify_module() + * to be able to read the CHIP_ID register + */ + ret = imx219_power_on(dev); + if (ret) + return ret; + + ret = imx219_identify_module(imx219); + if (ret) + goto error_power_off; + + /* Set default mode to max resolution */ + imx219->mode = &supported_modes[0]; + + /* sensor doesn't enter LP-11 state upon power up until and unless + * streaming is started, so upon power up switch the modes to: + * streaming -> standby + */ + ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, + IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING); + if (ret < 0) + goto error_power_off; + usleep_range(100, 110); + + /* put sensor back to standby mode */ + ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, + IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY); + if (ret < 0) + goto error_power_off; + usleep_range(100, 110); + + ret = imx219_init_controls(imx219); + if (ret) + goto error_power_off; + + /* Initialize subdev */ + imx219->sd.internal_ops = &imx219_internal_ops; + imx219->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + imx219->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Initialize source pad */ + imx219->pad.flags = MEDIA_PAD_FL_SOURCE; + + /* Initialize default format */ + imx219_set_default_format(imx219); + + ret = media_entity_pads_init(&imx219->sd.entity, 1, &imx219->pad); + if (ret) { + dev_err(dev, "failed to init entity pads: %d\n", ret); + goto error_handler_free; + } + + ret = v4l2_async_register_subdev_sensor_common(&imx219->sd); + if (ret < 0) { + dev_err(dev, "failed to register sensor sub-device: %d\n", ret); + goto error_media_entity; + } + + /* Enable runtime PM and turn off the device */ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_idle(dev); + + return 0; + +error_media_entity: + media_entity_cleanup(&imx219->sd.entity); + +error_handler_free: + imx219_free_controls(imx219); + +error_power_off: + imx219_power_off(dev); + + return ret; +} + +static int imx219_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx219 *imx219 = to_imx219(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + imx219_free_controls(imx219); + + pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + imx219_power_off(&client->dev); + pm_runtime_set_suspended(&client->dev); + + return 0; +} + +static const struct of_device_id imx219_dt_ids[] = { + { .compatible = "sony,imx219" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx219_dt_ids); + +static const struct dev_pm_ops imx219_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx219_suspend, imx219_resume) + SET_RUNTIME_PM_OPS(imx219_power_off, imx219_power_on, NULL) +}; + +static struct i2c_driver imx219_i2c_driver = { + .driver = { + .name = "imx219", + .of_match_table = imx219_dt_ids, + .pm = &imx219_pm_ops, + }, + .probe_new = imx219_probe, + .remove = imx219_remove, +}; + +module_i2c_driver(imx219_i2c_driver); + +MODULE_AUTHOR("Dave Stevenson <dave.stevenson@raspberrypi.com"); +MODULE_DESCRIPTION("Sony IMX219 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c index 1ae252378799..8537cc4ca108 100644 --- a/drivers/media/i2c/ov5675.c +++ b/drivers/media/i2c/ov5675.c @@ -63,6 +63,10 @@ #define OV5675_TEST_PATTERN_ENABLE BIT(7) #define OV5675_TEST_PATTERN_BAR_SHIFT 2 +/* Flip Mirror Controls from sensor */ +#define OV5675_REG_FORMAT1 0x3820 +#define OV5675_REG_FORMAT2 0x373d + #define to_ov5675(_sd) container_of(_sd, struct ov5675, sd) enum { @@ -314,21 +318,21 @@ static const struct ov5675_reg mode_1296x972_regs[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, - {0x3803, 0xf4}, + {0x3803, 0x00}, {0x3804, 0x0a}, {0x3805, 0x3f}, - {0x3806, 0x06}, - {0x3807, 0xb3}, + {0x3806, 0x07}, + {0x3807, 0xb7}, {0x3808, 0x05}, - {0x3809, 0x00}, - {0x380a, 0x02}, - {0x380b, 0xd0}, + {0x3809, 0x10}, + {0x380a, 0x03}, + {0x380b, 0xcc}, {0x380c, 0x02}, {0x380d, 0xee}, {0x380e, 0x07}, - {0x380f, 0xe4}, - {0x3811, 0x10}, - {0x3813, 0x09}, + {0x380f, 0xd0}, + {0x3811, 0x08}, + {0x3813, 0x0d}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x03}, @@ -604,6 +608,53 @@ static int ov5675_test_pattern(struct ov5675 *ov5675, u32 pattern) OV5675_REG_VALUE_08BIT, pattern); } +/* + * OV5675 supports keeping the pixel order by mirror and flip function + * The Bayer order isn't affected by the flip controls + */ +static int ov5675_set_ctrl_hflip(struct ov5675 *ov5675, u32 ctrl_val) +{ + int ret; + u32 val; + + ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT1, + OV5675_REG_VALUE_08BIT, &val); + if (ret) + return ret; + + return ov5675_write_reg(ov5675, OV5675_REG_FORMAT1, + OV5675_REG_VALUE_08BIT, + ctrl_val ? val & ~BIT(3) : val); +} + +static int ov5675_set_ctrl_vflip(struct ov5675 *ov5675, u8 ctrl_val) +{ + int ret; + u32 val; + + ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT1, + OV5675_REG_VALUE_08BIT, &val); + if (ret) + return ret; + + ret = ov5675_write_reg(ov5675, OV5675_REG_FORMAT1, + OV5675_REG_VALUE_08BIT, + ctrl_val ? val | BIT(4) | BIT(5) : val); + + if (ret) + return ret; + + ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT2, + OV5675_REG_VALUE_08BIT, &val); + + if (ret) + return ret; + + return ov5675_write_reg(ov5675, OV5675_REG_FORMAT2, + OV5675_REG_VALUE_08BIT, + ctrl_val ? val | BIT(1) : val); +} + static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov5675 *ov5675 = container_of(ctrl->handler, @@ -654,6 +705,14 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl) ret = ov5675_test_pattern(ov5675, ctrl->val); break; + case V4L2_CID_HFLIP: + ov5675_set_ctrl_hflip(ov5675, ctrl->val); + break; + + case V4L2_CID_VFLIP: + ov5675_set_ctrl_vflip(ov5675, ctrl->val); + break; + default: ret = -EINVAL; break; @@ -722,6 +781,11 @@ static int ov5675_init_controls(struct ov5675 *ov5675) V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov5675_test_pattern_menu) - 1, 0, 0, ov5675_test_pattern_menu); + v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + if (ctrl_hdlr->error) return ctrl_hdlr->error; diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c index d6cd15bb699a..cc678d9d2e0d 100644 --- a/drivers/media/i2c/ov5695.c +++ b/drivers/media/i2c/ov5695.c @@ -971,16 +971,9 @@ unlock_and_return: return ret; } -/* Calculate the delay in us by clock rate and clock cycles */ -static inline u32 ov5695_cal_delay(u32 cycles) -{ - return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000); -} - static int __ov5695_power_on(struct ov5695 *ov5695) { - int ret; - u32 delay_us; + int i, ret; struct device *dev = &ov5695->client->dev; ret = clk_prepare_enable(ov5695->xvclk); @@ -991,21 +984,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695) gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators\n"); - goto disable_clk; + /* + * The hardware requires the regulators to be powered on in order, + * so enable them one by one. + */ + for (i = 0; i < OV5695_NUM_SUPPLIES; i++) { + ret = regulator_enable(ov5695->supplies[i].consumer); + if (ret) { + dev_err(dev, "Failed to enable %s: %d\n", + ov5695->supplies[i].supply, ret); + goto disable_reg_clk; + } } gpiod_set_value_cansleep(ov5695->reset_gpio, 0); - /* 8192 cycles prior to first SCCB transaction */ - delay_us = ov5695_cal_delay(8192); - usleep_range(delay_us, delay_us * 2); + usleep_range(1000, 1200); return 0; -disable_clk: +disable_reg_clk: + for (--i; i >= 0; i--) + regulator_disable(ov5695->supplies[i].consumer); clk_disable_unprepare(ov5695->xvclk); return ret; @@ -1013,9 +1013,22 @@ disable_clk: static void __ov5695_power_off(struct ov5695 *ov5695) { + struct device *dev = &ov5695->client->dev; + int i, ret; + clk_disable_unprepare(ov5695->xvclk); gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies); + + /* + * The hardware requires the regulators to be powered off in order, + * so disable them one by one. + */ + for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) { + ret = regulator_disable(ov5695->supplies[i].consumer); + if (ret) + dev_err(dev, "Failed to disable %s: %d\n", + ov5695->supplies[i].supply, ret); + } } static int __maybe_unused ov5695_runtime_resume(struct device *dev) @@ -1285,7 +1298,7 @@ static int ov5695_probe(struct i2c_client *client, if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ) dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n"); - ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ov5695->reset_gpio)) { dev_err(dev, "Failed to get reset-gpios\n"); return -EINVAL; diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c index 8911660da86f..71cf68a95bb2 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c @@ -547,7 +547,7 @@ int s5c73m3_init_controls(struct s5c73m3 *state) V4L2_CTRL_FLAG_UPDATE; v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso, 0, false); ctrls->af_status->flags |= V4L2_CTRL_FLAG_VOLATILE; - v4l2_ctrl_cluster(6, &ctrls->focus_auto); + v4l2_ctrl_cluster(5, &ctrls->focus_auto); state->sensor_sd.ctrl_handler = hdl; diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index a80d7701b519..5e4f6a2ef78e 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -57,6 +57,45 @@ static const struct smiapp_module_ident smiapp_module_idents[] = { * */ +static u32 smiapp_get_limit(struct smiapp_sensor *sensor, + unsigned int limit) +{ + if (WARN_ON(limit >= SMIAPP_LIMIT_LAST)) + return 1; + + return sensor->limits[limit]; +} + +#define SMIA_LIM(sensor, limit) \ + smiapp_get_limit(sensor, SMIAPP_LIMIT_##limit) + +static int smiapp_read_all_smia_limits(struct smiapp_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i; + int rval; + + for (i = 0; i < SMIAPP_LIMIT_LAST; i++) { + u32 val; + + rval = smiapp_read( + sensor, smiapp_reg_limits[i].addr, &val); + if (rval) + return rval; + + sensor->limits[i] = val; + + dev_dbg(&client->dev, "0x%8.8x \"%s\" = %u, 0x%x\n", + smiapp_reg_limits[i].addr, + smiapp_reg_limits[i].what, val, val); + } + + if (SMIA_LIM(sensor, SCALER_N_MIN) == 0) + smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16); + + return 0; +} + static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); @@ -240,35 +279,35 @@ static int smiapp_pll_try(struct smiapp_sensor *sensor, { struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); struct smiapp_pll_limits lim = { - .min_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV], - .max_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV], - .min_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ], - .max_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ], - .min_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MIN_PLL_MULTIPLIER], - .max_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MAX_PLL_MULTIPLIER], - .min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ], - .max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ], - - .op.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV], - .op.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV], - .op.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV], - .op.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV], - .op.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ], - .op.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ], - .op.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ], - .op.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ], - - .vt.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV], - .vt.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV], - .vt.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV], - .vt.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV], - .vt.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ], - .vt.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ], - .vt.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ], - .vt.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ], - - .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN], - .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK], + .min_pre_pll_clk_div = SMIA_LIM(sensor, MIN_PRE_PLL_CLK_DIV), + .max_pre_pll_clk_div = SMIA_LIM(sensor, MAX_PRE_PLL_CLK_DIV), + .min_pll_ip_freq_hz = SMIA_LIM(sensor, MIN_PLL_IP_FREQ_HZ), + .max_pll_ip_freq_hz = SMIA_LIM(sensor, MAX_PLL_IP_FREQ_HZ), + .min_pll_multiplier = SMIA_LIM(sensor, MIN_PLL_MULTIPLIER), + .max_pll_multiplier = SMIA_LIM(sensor, MAX_PLL_MULTIPLIER), + .min_pll_op_freq_hz = SMIA_LIM(sensor, MIN_PLL_OP_FREQ_HZ), + .max_pll_op_freq_hz = SMIA_LIM(sensor, MAX_PLL_OP_FREQ_HZ), + + .op.min_sys_clk_div = SMIA_LIM(sensor, MIN_OP_SYS_CLK_DIV), + .op.max_sys_clk_div = SMIA_LIM(sensor, MAX_OP_SYS_CLK_DIV), + .op.min_pix_clk_div = SMIA_LIM(sensor, MIN_OP_PIX_CLK_DIV), + .op.max_pix_clk_div = SMIA_LIM(sensor, MAX_OP_PIX_CLK_DIV), + .op.min_sys_clk_freq_hz = SMIA_LIM(sensor, MIN_OP_SYS_CLK_FREQ_HZ), + .op.max_sys_clk_freq_hz = SMIA_LIM(sensor, MAX_OP_SYS_CLK_FREQ_HZ), + .op.min_pix_clk_freq_hz = SMIA_LIM(sensor, MIN_OP_PIX_CLK_FREQ_HZ), + .op.max_pix_clk_freq_hz = SMIA_LIM(sensor, MAX_OP_PIX_CLK_FREQ_HZ), + + .vt.min_sys_clk_div = SMIA_LIM(sensor, MIN_VT_SYS_CLK_DIV), + .vt.max_sys_clk_div = SMIA_LIM(sensor, MAX_VT_SYS_CLK_DIV), + .vt.min_pix_clk_div = SMIA_LIM(sensor, MIN_VT_PIX_CLK_DIV), + .vt.max_pix_clk_div = SMIA_LIM(sensor, MAX_VT_PIX_CLK_DIV), + .vt.min_sys_clk_freq_hz = SMIA_LIM(sensor, MIN_VT_SYS_CLK_FREQ_HZ), + .vt.max_sys_clk_freq_hz = SMIA_LIM(sensor, MAX_VT_SYS_CLK_FREQ_HZ), + .vt.min_pix_clk_freq_hz = SMIA_LIM(sensor, MIN_VT_PIX_CLK_FREQ_HZ), + .vt.max_pix_clk_freq_hz = SMIA_LIM(sensor, MAX_VT_PIX_CLK_FREQ_HZ), + + .min_line_length_pck_bin = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK_BIN), + .min_line_length_pck = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK), }; return smiapp_pll_calculate(&client->dev, &lim, pll); @@ -311,7 +350,7 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor) max = sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height + sensor->vblank->val - - sensor->limits[SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN]; + - SMIA_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN); __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, max); } @@ -568,10 +607,10 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor) sensor->analog_gain = v4l2_ctrl_new_std( &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, - sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN], - sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX], - max(sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP], 1U), - sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN]); + SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MIN), + SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MAX), + max(SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_STEP), 1U), + SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MIN)); /* Exposure limits will be updated soon, use just something here. */ sensor->exposure = v4l2_ctrl_new_std( @@ -677,45 +716,6 @@ static void smiapp_free_controls(struct smiapp_sensor *sensor) v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler); } -static int smiapp_get_limits(struct smiapp_sensor *sensor, int const *limit, - unsigned int n) -{ - struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); - unsigned int i; - u32 val; - int rval; - - for (i = 0; i < n; i++) { - rval = smiapp_read( - sensor, smiapp_reg_limits[limit[i]].addr, &val); - if (rval) - return rval; - sensor->limits[limit[i]] = val; - dev_dbg(&client->dev, "0x%8.8x \"%s\" = %u, 0x%x\n", - smiapp_reg_limits[limit[i]].addr, - smiapp_reg_limits[limit[i]].what, val, val); - } - - return 0; -} - -static int smiapp_get_all_limits(struct smiapp_sensor *sensor) -{ - unsigned int i; - int rval; - - for (i = 0; i < SMIAPP_LIMIT_LAST; i++) { - rval = smiapp_get_limits(sensor, &i, 1); - if (rval < 0) - return rval; - } - - if (sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] == 0) - smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16); - - return 0; -} - static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); @@ -869,21 +869,21 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor) int min, max; if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) { - min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]; - max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN]; - min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]; - max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN]; - min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]; + min_fll = SMIA_LIM(sensor, MIN_FRAME_LENGTH_LINES_BIN); + max_fll = SMIA_LIM(sensor, MAX_FRAME_LENGTH_LINES_BIN); + min_llp = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK_BIN); + max_llp = SMIA_LIM(sensor, MAX_LINE_LENGTH_PCK_BIN); + min_lbp = SMIA_LIM(sensor, MIN_LINE_BLANKING_PCK_BIN); } else { - min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES]; - max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES]; - min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK]; - max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK]; - min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK]; + min_fll = SMIA_LIM(sensor, MIN_FRAME_LENGTH_LINES); + max_fll = SMIA_LIM(sensor, MAX_FRAME_LENGTH_LINES); + min_llp = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK); + max_llp = SMIA_LIM(sensor, MAX_LINE_LENGTH_PCK); + min_lbp = SMIA_LIM(sensor, MIN_LINE_BLANKING_PCK); } min = max_t(int, - sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES], + SMIA_LIM(sensor, MIN_FRAME_BLANKING_LINES), min_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height); max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height; @@ -961,7 +961,7 @@ static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm, return -ENODATA; } - if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] & + if (SMIA_LIM(sensor, DATA_TRANSFER_IF_CAPABILITY) & SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) { for (i = 1000; i > 0; i--) { if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) @@ -1416,7 +1416,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor) */ /* Digital crop */ - if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY] + if (SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY) == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) { rval = smiapp_write( sensor, SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET, @@ -1444,7 +1444,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor) } /* Scaling */ - if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY] + if (SMIA_LIM(sensor, SCALING_CAPABILITY) != SMIAPP_SCALING_CAPABILITY_NONE) { rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALING_MODE, sensor->scaling_mode); @@ -1467,7 +1467,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor) if (rval < 0) goto out; - if ((sensor->limits[SMIAPP_LIMIT_FLASH_MODE_CAPABILITY] & + if ((SMIA_LIM(sensor, FLASH_MODE_CAPABILITY) & (SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE | SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) && sensor->hwcfg->strobe_setup != NULL && @@ -1715,8 +1715,7 @@ static void smiapp_propagate(struct v4l2_subdev *subdev, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { if (ssd == sensor->scaler) { sensor->scale_m = - sensor->limits[ - SMIAPP_LIMIT_SCALER_N_MIN]; + SMIA_LIM(sensor, SCALER_N_MIN); sensor->scaling_mode = SMIAPP_SCALING_MODE_NONE; } else if (ssd == sensor->binner) { @@ -1828,12 +1827,12 @@ static int smiapp_set_format(struct v4l2_subdev *subdev, fmt->format.width = clamp(fmt->format.width, - sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE], - sensor->limits[SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE]); + SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE), + SMIA_LIM(sensor, MAX_X_OUTPUT_SIZE)); fmt->format.height = clamp(fmt->format.height, - sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE], - sensor->limits[SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE]); + SMIA_LIM(sensor, MIN_Y_OUTPUT_SIZE), + SMIA_LIM(sensor, MAX_Y_OUTPUT_SIZE)); smiapp_get_crop_compose(subdev, cfg, crops, NULL, fmt->which); @@ -1886,7 +1885,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w, val -= abs(w - ask_w); val -= abs(h - ask_h); - if (w < sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE]) + if (w < SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE)) val -= SCALING_GOODNESS_EXTREME; dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n", @@ -1952,7 +1951,7 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev, struct i2c_client *client = v4l2_get_subdevdata(subdev); struct smiapp_sensor *sensor = to_smiapp_sensor(subdev); u32 min, max, a, b, max_m; - u32 scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; + u32 scale_m = SMIA_LIM(sensor, SCALER_N_MIN); int mode = SMIAPP_SCALING_MODE_HORIZONTAL; u32 try[4]; u32 ntry = 0; @@ -1965,19 +1964,19 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev, crops[SMIAPP_PAD_SINK]->height); a = crops[SMIAPP_PAD_SINK]->width - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.width; + * SMIA_LIM(sensor, SCALER_N_MIN) / sel->r.width; b = crops[SMIAPP_PAD_SINK]->height - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.height; + * SMIA_LIM(sensor, SCALER_N_MIN) / sel->r.height; max_m = crops[SMIAPP_PAD_SINK]->width - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] - / sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE]; + * SMIA_LIM(sensor, SCALER_N_MIN) + / SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE); - a = clamp(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN], - sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]); - b = clamp(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN], - sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]); - max_m = clamp(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN], - sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]); + a = clamp(a, SMIA_LIM(sensor, SCALER_M_MIN), + SMIA_LIM(sensor, SCALER_M_MAX)); + b = clamp(b, SMIA_LIM(sensor, SCALER_M_MIN), + SMIA_LIM(sensor, SCALER_M_MAX)); + max_m = clamp(max_m, SMIA_LIM(sensor, SCALER_M_MIN), + SMIA_LIM(sensor, SCALER_M_MAX)); dev_dbg(&client->dev, "scaling: a %d b %d max_m %d\n", a, b, max_m); @@ -2004,7 +2003,7 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev, subdev, crops[SMIAPP_PAD_SINK]->width / try[i] - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN], + * SMIA_LIM(sensor, SCALER_N_MIN), sel->r.width, crops[SMIAPP_PAD_SINK]->height, sel->r.height, @@ -2018,18 +2017,18 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev, best = this; } - if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY] + if (SMIA_LIM(sensor, SCALING_CAPABILITY) == SMIAPP_SCALING_CAPABILITY_HORIZONTAL) continue; this = scaling_goodness( subdev, crops[SMIAPP_PAD_SINK]->width / try[i] - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN], + * SMIA_LIM(sensor, SCALER_N_MIN), sel->r.width, crops[SMIAPP_PAD_SINK]->height / try[i] - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN], + * SMIA_LIM(sensor, SCALER_N_MIN), sel->r.height, sel->flags); @@ -2043,12 +2042,12 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev, sel->r.width = (crops[SMIAPP_PAD_SINK]->width / scale_m - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]) & ~1; + * SMIA_LIM(sensor, SCALER_N_MIN)) & ~1; if (mode == SMIAPP_SCALING_MODE_BOTH) sel->r.height = (crops[SMIAPP_PAD_SINK]->height / scale_m - * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]) + * SMIA_LIM(sensor, SCALER_N_MIN)) & ~1; else sel->r.height = crops[SMIAPP_PAD_SINK]->height; @@ -2104,7 +2103,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev, return 0; if (ssd == sensor->scaler && sel->pad == SMIAPP_PAD_SINK - && sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY] + && SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY) == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) return 0; return -EINVAL; @@ -2120,7 +2119,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev, if (ssd == sensor->binner) return 0; if (ssd == sensor->scaler - && sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY] + && SMIA_LIM(sensor, SCALING_CAPABILITY) != SMIAPP_SCALING_CAPABILITY_NONE) return 0; /* Fall through */ @@ -2185,8 +2184,8 @@ static void smiapp_get_native_size(struct smiapp_subdev *ssd, { r->top = 0; r->left = 0; - r->width = ssd->sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1; - r->height = ssd->sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1; + r->width = SMIA_LIM(ssd->sensor, X_ADDR_MAX) + 1; + r->height = SMIA_LIM(ssd->sensor, Y_ADDR_MAX) + 1; } static int __smiapp_get_selection(struct v4l2_subdev *subdev, @@ -2271,10 +2270,10 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev, sel->r.height = SMIAPP_ALIGN_DIM(sel->r.height, sel->flags); sel->r.width = max_t(unsigned int, - sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE], + SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE), sel->r.width); sel->r.height = max_t(unsigned int, - sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE], + SMIA_LIM(sensor, MIN_Y_OUTPUT_SIZE), sel->r.height); switch (sel->target) { @@ -2927,7 +2926,7 @@ static int smiapp_probe(struct i2c_client *client) goto out_power_off; } - rval = smiapp_get_all_limits(sensor); + rval = smiapp_read_all_smia_limits(sensor); if (rval) { rval = -ENODEV; goto out_power_off; @@ -2963,7 +2962,7 @@ static int smiapp_probe(struct i2c_client *client) goto out_power_off; } - if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) { + if (SMIA_LIM(sensor, BINNING_CAPABILITY)) { u32 val; rval = smiapp_read(sensor, @@ -3000,7 +2999,7 @@ static int smiapp_probe(struct i2c_client *client) } if (sensor->minfo.smiapp_version && - sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] & + SMIA_LIM(sensor, DATA_TRANSFER_IF_CAPABILITY) & SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) { if (device_create_file(&client->dev, &dev_attr_nvm) != 0) { dev_err(&client->dev, "sysfs nvm entry failed\n"); @@ -3010,21 +3009,21 @@ static int smiapp_probe(struct i2c_client *client) } /* We consider this as profile 0 sensor if any of these are zero. */ - if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] || - !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] || - !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] || - !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) { + if (!SMIA_LIM(sensor, MIN_OP_SYS_CLK_DIV) || + !SMIA_LIM(sensor, MAX_OP_SYS_CLK_DIV) || + !SMIA_LIM(sensor, MIN_OP_PIX_CLK_DIV) || + !SMIA_LIM(sensor, MAX_OP_PIX_CLK_DIV)) { sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0; - } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY] + } else if (SMIA_LIM(sensor, SCALING_CAPABILITY) != SMIAPP_SCALING_CAPABILITY_NONE) { - if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY] + if (SMIA_LIM(sensor, SCALING_CAPABILITY) == SMIAPP_SCALING_CAPABILITY_HORIZONTAL) sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1; else sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2; sensor->scaler = &sensor->ssds[sensor->ssds_used]; sensor->ssds_used++; - } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY] + } else if (SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY) == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) { sensor->scaler = &sensor->ssds[sensor->ssds_used]; sensor->ssds_used++; @@ -3034,13 +3033,13 @@ static int smiapp_probe(struct i2c_client *client) sensor->pixel_array = &sensor->ssds[sensor->ssds_used]; sensor->ssds_used++; - sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; + sensor->scale_m = SMIA_LIM(sensor, SCALER_N_MIN); /* prepare PLL configuration input values */ sensor->pll.bus_type = SMIAPP_PLL_BUS_TYPE_CSI2; sensor->pll.csi2.lanes = sensor->hwcfg->lanes; sensor->pll.ext_clk_freq_hz = sensor->hwcfg->ext_clk; - sensor->pll.scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; + sensor->pll.scale_n = SMIA_LIM(sensor, SCALER_N_MIN); /* Profile 0 sensors have no separate OP clock branch. */ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) sensor->pll.flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS; diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h index 43505cd0616e..e6f96309786f 100644 --- a/drivers/media/i2c/smiapp/smiapp-reg.h +++ b/drivers/media/i2c/smiapp/smiapp-reg.h @@ -35,6 +35,10 @@ #define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0) #define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1) +#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0 +#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1 +#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2 + #define SMIAPP_DPHY_CTRL_AUTOMATIC 0 /* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */ #define SMIAPP_DPHY_CTRL_UI 1 diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c index ce8c1d47fbf0..1b58b7c6c839 100644 --- a/drivers/media/i2c/smiapp/smiapp-regs.c +++ b/drivers/media/i2c/smiapp/smiapp-regs.c @@ -8,6 +8,8 @@ * Contact: Sakari Ailus <sakari.ailus@iki.fi> */ +#include <asm/unaligned.h> + #include <linux/delay.h> #include <linux/i2c.h> @@ -69,18 +71,19 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg, { struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); struct i2c_msg msg; - unsigned char data[4]; - u16 offset = reg; + unsigned char data_buf[sizeof(u32)] = { 0 }; + unsigned char offset_buf[sizeof(u16)]; int r; + if (len > sizeof(data_buf)) + return -EINVAL; + msg.addr = client->addr; msg.flags = 0; - msg.len = 2; - msg.buf = data; + msg.len = sizeof(offset_buf); + msg.buf = offset_buf; + put_unaligned_be16(reg, offset_buf); - /* high byte goes out first */ - data[0] = (u8) (offset >> 8); - data[1] = (u8) offset; r = i2c_transfer(client->adapter, &msg, 1); if (r != 1) { if (r >= 0) @@ -90,6 +93,8 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg, msg.len = len; msg.flags = I2C_M_RD; + msg.buf = &data_buf[sizeof(data_buf) - len]; + r = i2c_transfer(client->adapter, &msg, 1); if (r != 1) { if (r >= 0) @@ -97,27 +102,12 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg, goto err; } - *val = 0; - /* high byte comes first */ - switch (len) { - case SMIAPP_REG_32BIT: - *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + - data[3]; - break; - case SMIAPP_REG_16BIT: - *val = (data[0] << 8) + data[1]; - break; - case SMIAPP_REG_8BIT: - *val = data[0]; - break; - default: - BUG(); - } + *val = get_unaligned_be32(data_buf); return 0; err: - dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r); + dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); return r; } @@ -158,7 +148,7 @@ static int __smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val, && len != SMIAPP_REG_32BIT) return -EINVAL; - if (len == SMIAPP_REG_8BIT || !only8) + if (!only8) rval = ____smiapp_read(sensor, SMIAPP_REG_ADDR(reg), len, val); else rval = ____smiapp_read_8only(sensor, SMIAPP_REG_ADDR(reg), len, @@ -214,13 +204,10 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val) struct i2c_msg msg; unsigned char data[6]; unsigned int retries; - u8 flags = SMIAPP_REG_FLAGS(reg); u8 len = SMIAPP_REG_WIDTH(reg); - u16 offset = SMIAPP_REG_ADDR(reg); int r; - if ((len != SMIAPP_REG_8BIT && len != SMIAPP_REG_16BIT && - len != SMIAPP_REG_32BIT) || flags) + if (len > sizeof(data) - 2) return -EINVAL; msg.addr = client->addr; @@ -228,27 +215,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val) msg.len = 2 + len; msg.buf = data; - /* high byte goes out first */ - data[0] = (u8) (reg >> 8); - data[1] = (u8) (reg & 0xff); - - switch (len) { - case SMIAPP_REG_8BIT: - data[2] = val; - break; - case SMIAPP_REG_16BIT: - data[2] = val >> 8; - data[3] = val; - break; - case SMIAPP_REG_32BIT: - data[2] = val >> 24; - data[3] = val >> 16; - data[4] = val >> 8; - data[5] = val; - break; - default: - BUG(); - } + put_unaligned_be16(SMIAPP_REG_ADDR(reg), data); + put_unaligned_be32(val << (8 * (sizeof(val) - len)), data + 2); for (retries = 0; retries < 5; retries++) { /* @@ -269,7 +237,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val) } dev_err(&client->dev, - "wrote 0x%x to offset 0x%x error %d\n", val, offset, r); + "wrote 0x%x to offset 0x%x error %d\n", val, + SMIAPP_REG_ADDR(reg), r); return r; } diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h index 4837d80dc453..6f469934f9e3 100644 --- a/drivers/media/i2c/smiapp/smiapp.h +++ b/drivers/media/i2c/smiapp/smiapp.h @@ -14,7 +14,6 @@ #include <linux/mutex.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-subdev.h> -#include <media/i2c/smiapp.h> #include "smiapp-pll.h" #include "smiapp-reg.h" @@ -42,6 +41,49 @@ #define SMIAPP_COLOUR_COMPONENTS 4 +#define SMIAPP_NAME "smiapp" + +#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */ +#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */ + +/* + * Sometimes due to board layout considerations the camera module can be + * mounted rotated. The typical rotation used is 180 degrees which can be + * corrected by giving a default H-FLIP and V-FLIP in the sensor readout. + * FIXME: rotation also changes the bayer pattern. + */ +enum smiapp_module_board_orient { + SMIAPP_MODULE_BOARD_ORIENT_0 = 0, + SMIAPP_MODULE_BOARD_ORIENT_180, +}; + +struct smiapp_flash_strobe_parms { + u8 mode; + u32 strobe_width_high_us; + u16 strobe_delay; + u16 stobe_start_point; + u8 trigger; +}; + +struct smiapp_hwconfig { + /* + * Change the cci address if i2c_addr_alt is set. + * Both default and alternate cci addr need to be present + */ + unsigned short i2c_addr_dfl; /* Default i2c addr */ + unsigned short i2c_addr_alt; /* Alternate i2c addr */ + + uint32_t ext_clk; /* sensor external clk */ + + unsigned int lanes; /* Number of CSI-2 lanes */ + uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */ + uint64_t *op_sys_clock; + + enum smiapp_module_board_orient module_board_orient; + + struct smiapp_flash_strobe_parms *strobe_setup; +}; + #include "smiapp-limits.h" struct smiapp_quirk; diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index edad49cebcdf..eb39cf5ea089 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -13,12 +13,15 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_graph.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> +#include <media/v4l2-event.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> +#include <media/v4l2-rect.h> #include "tvp5150_reg.h" @@ -31,6 +34,15 @@ #define TVP5150_MBUS_FMT MEDIA_BUS_FMT_UYVY8_2X8 #define TVP5150_FIELD V4L2_FIELD_ALTERNATE #define TVP5150_COLORSPACE V4L2_COLORSPACE_SMPTE170M +#define TVP5150_STD_MASK (V4L2_STD_NTSC | \ + V4L2_STD_NTSC_443 | \ + V4L2_STD_PAL | \ + V4L2_STD_PAL_M | \ + V4L2_STD_PAL_N | \ + V4L2_STD_PAL_Nc | \ + V4L2_STD_SECAM) + +#define TVP5150_MAX_CONNECTORS 3 /* Check dt-bindings for more information */ MODULE_DESCRIPTION("Texas Instruments TVP5150A/TVP5150AM1/TVP5151 video decoder driver"); MODULE_AUTHOR("Mauro Carvalho Chehab"); @@ -44,18 +56,26 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)"); #define dprintk0(__dev, __arg...) dev_dbg_lvl(__dev, 0, 0, __arg) enum tvp5150_pads { - TVP5150_PAD_IF_INPUT, + TVP5150_PAD_AIP1A, + TVP5150_PAD_AIP1B, TVP5150_PAD_VID_OUT, TVP5150_NUM_PADS }; +struct tvp5150_connector { + struct v4l2_fwnode_connector base; + struct media_entity ent; + struct media_pad pad; +}; + struct tvp5150 { struct v4l2_subdev sd; -#ifdef CONFIG_MEDIA_CONTROLLER + struct media_pad pads[TVP5150_NUM_PADS]; - struct media_entity input_ent[TVP5150_INPUT_NUM]; - struct media_pad input_pad[TVP5150_INPUT_NUM]; -#endif + struct tvp5150_connector connectors[TVP5150_MAX_CONNECTORS]; + struct tvp5150_connector *cur_connector; + unsigned int connectors_num; + struct v4l2_ctrl_handler hdl; struct v4l2_rect rect; struct regmap *regmap; @@ -282,9 +302,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd) break; } - dev_dbg_lvl(sd->dev, 1, debug, "Selecting video route: route input=%i, output=%i => tvp5150 input=%i, opmode=%i\n", - decoder->input, decoder->output, - input, opmode); + dev_dbg_lvl(sd->dev, 1, debug, + "Selecting video route: route input=%s, output=%s => tvp5150 input=0x%02x, opmode=0x%02x\n", + decoder->input == 0 ? "aip1a" : + decoder->input == 2 ? "aip1b" : "svideo", + decoder->output == 0 ? "normal" : "black-frame-gen", + input, opmode); regmap_write(decoder->regmap, TVP5150_OP_MODE_CTL, opmode); regmap_write(decoder->regmap, TVP5150_VD_IN_SRC_SEL_1, input); @@ -773,17 +796,33 @@ static int tvp5150_g_std(struct v4l2_subdev *sd, v4l2_std_id *std) static int tvp5150_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct tvp5150 *decoder = to_tvp5150(sd); + struct tvp5150_connector *cur_con = decoder->cur_connector; + v4l2_std_id supported_stds; if (decoder->norm == std) return 0; + /* In case of no of-connectors are available no limitations are made */ + if (!decoder->connectors_num) + supported_stds = V4L2_STD_ALL; + else + supported_stds = cur_con->base.connector.analog.sdtv_stds; + + /* + * Check if requested std or group of std's is/are supported by the + * connector. + */ + if ((supported_stds & std) == 0) + return -EINVAL; + /* Change cropping height limits */ if (std & V4L2_STD_525_60) decoder->rect.height = TVP5150_V_MAX_525_60; else decoder->rect.height = TVP5150_V_MAX_OTHERS; - decoder->norm = std; + /* Set only the specific supported std in case of group of std's. */ + decoder->norm = supported_stds & std; return tvp5150_set_std(sd, std); } @@ -986,6 +1025,25 @@ static void tvp5150_set_default(v4l2_std_id std, struct v4l2_rect *crop) crop->height = TVP5150_V_MAX_OTHERS; } +static struct v4l2_rect * +tvp5150_get_pad_crop(struct tvp5150 *decoder, + struct v4l2_subdev_pad_config *cfg, unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + switch (which) { + case V4L2_SUBDEV_FORMAT_ACTIVE: + return &decoder->rect; + case V4L2_SUBDEV_FORMAT_TRY: +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return v4l2_subdev_get_try_crop(&decoder->sd, cfg, pad); +#else + return ERR_PTR(-EINVAL); +#endif + default: + return ERR_PTR(-EINVAL); + } +} + static int tvp5150_fill_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) @@ -1010,25 +1068,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, return 0; } -static int tvp5150_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) +static unsigned int tvp5150_get_hmax(struct v4l2_subdev *sd) { struct tvp5150 *decoder = to_tvp5150(sd); - struct v4l2_rect rect = sel->r; v4l2_std_id std; - int hmax; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n", - __func__, rect.left, rect.top, rect.width, rect.height); - - /* tvp5150 has some special limits */ - rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT); - rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP); /* Calculate height based on current standard */ if (decoder->norm == V4L2_STD_ALL) @@ -1036,36 +1079,78 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, else std = decoder->norm; - if (std & V4L2_STD_525_60) - hmax = TVP5150_V_MAX_525_60; - else - hmax = TVP5150_V_MAX_OTHERS; + return (std & V4L2_STD_525_60) ? + TVP5150_V_MAX_525_60 : TVP5150_V_MAX_OTHERS; +} - /* - * alignments: - * - width = 2 due to UYVY colorspace - * - height, image = no special alignment - */ - v4l_bound_align_image(&rect.width, - TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, - TVP5150_H_MAX - rect.left, 1, &rect.height, - hmax - TVP5150_MAX_CROP_TOP - rect.top, - hmax - rect.top, 0, 0); +static void tvp5150_set_hw_selection(struct v4l2_subdev *sd, + struct v4l2_rect *rect) +{ + struct tvp5150 *decoder = to_tvp5150(sd); + unsigned int hmax = tvp5150_get_hmax(sd); - regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_START, rect.top); + regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_START, rect->top); regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_STOP, - rect.top + rect.height - hmax); + rect->top + rect->height - hmax); regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_ST_MSB, - rect.left >> TVP5150_CROP_SHIFT); + rect->left >> TVP5150_CROP_SHIFT); regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_ST_LSB, - rect.left | (1 << TVP5150_CROP_SHIFT)); + rect->left | (1 << TVP5150_CROP_SHIFT)); regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_STP_MSB, - (rect.left + rect.width - TVP5150_MAX_CROP_LEFT) >> + (rect->left + rect->width - TVP5150_MAX_CROP_LEFT) >> TVP5150_CROP_SHIFT); regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_STP_LSB, - rect.left + rect.width - TVP5150_MAX_CROP_LEFT); + rect->left + rect->width - TVP5150_MAX_CROP_LEFT); +} + +static int tvp5150_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct tvp5150 *decoder = to_tvp5150(sd); + struct v4l2_rect *rect = &sel->r; + struct v4l2_rect *crop; + unsigned int hmax; + + if (sel->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n", + __func__, rect->left, rect->top, rect->width, rect->height); + + /* tvp5150 has some special limits */ + rect->left = clamp(rect->left, 0, TVP5150_MAX_CROP_LEFT); + rect->top = clamp(rect->top, 0, TVP5150_MAX_CROP_TOP); + hmax = tvp5150_get_hmax(sd); + + /* + * alignments: + * - width = 2 due to UYVY colorspace + * - height, image = no special alignment + */ + v4l_bound_align_image(&rect->width, + TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect->left, + TVP5150_H_MAX - rect->left, 1, &rect->height, + hmax - TVP5150_MAX_CROP_TOP - rect->top, + hmax - rect->top, 0, 0); + + if (!IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API) && + sel->which == V4L2_SUBDEV_FORMAT_TRY) + return 0; + + crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad, sel->which); + if (IS_ERR(crop)) + return PTR_ERR(crop); + + /* + * Update output image size if the selection (crop) rectangle size or + * position has been modified. + */ + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && + !v4l2_rect_equal(rect, crop)) + tvp5150_set_hw_selection(sd, rect); - decoder->rect = rect; + *crop = *rect; return 0; } @@ -1075,11 +1160,9 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_selection *sel) { struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd); + struct v4l2_rect *crop; v4l2_std_id std; - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; @@ -1097,7 +1180,11 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd, sel->r.height = TVP5150_V_MAX_OTHERS; return 0; case V4L2_SEL_TGT_CROP: - sel->r = decoder->rect; + crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad, + sel->which); + if (IS_ERR(crop)) + return PTR_ERR(crop); + sel->r = *crop; return 0; default: return -EINVAL; @@ -1170,30 +1257,148 @@ static int tvp5150_enum_frame_size(struct v4l2_subdev *sd, } /**************************************************************************** - Media entity ops + * Media entity ops ****************************************************************************/ +#if defined(CONFIG_MEDIA_CONTROLLER) +static int tvp5150_set_link(struct media_pad *connector_pad, + struct media_pad *tvp5150_pad, u32 flags) +{ + struct media_link *link; + + link = media_entity_find_link(connector_pad, tvp5150_pad); + if (!link) + return -EINVAL; + + link->flags = flags; + link->reverse->flags = link->flags; + + return 0; +} + +static int tvp5150_disable_all_input_links(struct tvp5150 *decoder) +{ + struct media_pad *connector_pad; + unsigned int i; + int err; + + for (i = 0; i < TVP5150_NUM_PADS - 1; i++) { + connector_pad = media_entity_remote_pad(&decoder->pads[i]); + if (!connector_pad) + continue; + + err = tvp5150_set_link(connector_pad, &decoder->pads[i], 0); + if (err) + return err; + } + + return 0; +} + +static int tvp5150_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, + u32 config); -#ifdef CONFIG_MEDIA_CONTROLLER static int tvp5150_link_setup(struct media_entity *entity, - const struct media_pad *local, + const struct media_pad *tvp5150_pad, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct tvp5150 *decoder = to_tvp5150(sd); - int i; + struct media_pad *other_tvp5150_pad = + &decoder->pads[tvp5150_pad->index ^ 1]; + struct v4l2_fwnode_connector *v4l2c; + bool is_svideo = false; + unsigned int i; + int err; + + /* + * The TVP5150 state is determined by the enabled sink pad link(s). + * Enabling or disabling the source pad link has no effect. + */ + if (tvp5150_pad->flags & MEDIA_PAD_FL_SOURCE) + return 0; - for (i = 0; i < TVP5150_INPUT_NUM; i++) { - if (remote->entity == &decoder->input_ent[i]) + /* Check if the svideo connector should be enabled */ + for (i = 0; i < decoder->connectors_num; i++) { + if (remote->entity == &decoder->connectors[i].ent) { + v4l2c = &decoder->connectors[i].base; + is_svideo = v4l2c->type == V4L2_CONN_SVIDEO; break; + } } - /* Do nothing for entities that are not input connectors */ - if (i == TVP5150_INPUT_NUM) - return 0; + dev_dbg_lvl(sd->dev, 1, debug, "link setup '%s':%d->'%s':%d[%d]", + remote->entity->name, remote->index, + tvp5150_pad->entity->name, tvp5150_pad->index, + flags & MEDIA_LNK_FL_ENABLED); + if (is_svideo) + dev_dbg_lvl(sd->dev, 1, debug, + "link setup '%s':%d->'%s':%d[%d]", + remote->entity->name, remote->index, + other_tvp5150_pad->entity->name, + other_tvp5150_pad->index, + flags & MEDIA_LNK_FL_ENABLED); - decoder->input = i; + /* + * The TVP5150 has an internal mux which allows the following setup: + * + * comp-connector1 --\ + * |---> AIP1A + * / + * svideo-connector -| + * \ + * |---> AIP1B + * comp-connector2 --/ + * + * We can't rely on user space that the current connector gets disabled + * first before enabling the new connector. Disable all active + * connector links to be on the safe side. + */ + err = tvp5150_disable_all_input_links(decoder); + if (err) + return err; + + tvp5150_s_routing(sd, is_svideo ? TVP5150_SVIDEO : tvp5150_pad->index, + flags & MEDIA_LNK_FL_ENABLED ? TVP5150_NORMAL : + TVP5150_BLACK_SCREEN, 0); + + if (flags & MEDIA_LNK_FL_ENABLED) { + struct v4l2_fwnode_connector_analog *v4l2ca; + u32 new_norm; + + /* + * S-Video connector is conneted to both ports AIP1A and AIP1B. + * Both links must be enabled in one-shot regardless which link + * the user requests. + */ + if (is_svideo) { + err = tvp5150_set_link((struct media_pad *)remote, + other_tvp5150_pad, flags); + if (err) + return err; + } - tvp5150_selmux(sd); + if (!decoder->connectors_num) + return 0; + + /* Update the current connector */ + decoder->cur_connector = + container_of(remote, struct tvp5150_connector, pad); + + /* + * Do nothing if the new connector supports the same tv-norms as + * the old one. + */ + v4l2ca = &decoder->cur_connector->base.connector.analog; + new_norm = decoder->norm & v4l2ca->sdtv_stds; + if (decoder->norm == new_norm) + return 0; + + /* + * Fallback to the new connector tv-norms if we can't find any + * common between the current tv-norm and the new one. + */ + tvp5150_s_std(sd, new_norm ? new_norm : v4l2ca->sdtv_stds); + } return 0; } @@ -1202,20 +1407,54 @@ static const struct media_entity_operations tvp5150_sd_media_ops = { .link_setup = tvp5150_link_setup, }; #endif - /**************************************************************************** I2C Command ****************************************************************************/ +static int __maybe_unused tvp5150_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct tvp5150 *decoder = to_tvp5150(sd); + + if (decoder->irq) + /* Disable lock interrupt */ + return regmap_update_bits(decoder->regmap, + TVP5150_INT_ENABLE_REG_A, + TVP5150_INT_A_LOCK, 0); + return 0; +} + +static int __maybe_unused tvp5150_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct tvp5150 *decoder = to_tvp5150(sd); + + if (decoder->irq) + /* Enable lock interrupt */ + return regmap_update_bits(decoder->regmap, + TVP5150_INT_ENABLE_REG_A, + TVP5150_INT_A_LOCK, + TVP5150_INT_A_LOCK); + return 0; +} static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) { struct tvp5150 *decoder = to_tvp5150(sd); - unsigned int mask, val = 0, int_val = 0; + unsigned int mask, val = 0; + int ret; mask = TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE | TVP5150_MISC_CTL_CLOCK_OE; if (enable) { + ret = pm_runtime_get_sync(sd->dev); + if (ret < 0) { + pm_runtime_put_noidle(sd->dev); + return ret; + } + tvp5150_enable(sd); /* Enable outputs if decoder is locked */ @@ -1223,15 +1462,13 @@ static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) val = decoder->lock ? decoder->oe : 0; else val = decoder->oe; - int_val = TVP5150_INT_A_LOCK; + v4l2_subdev_notify_event(&decoder->sd, &tvp5150_ev_fmt); + } else { + pm_runtime_put(sd->dev); } regmap_update_bits(decoder->regmap, TVP5150_MISC_CTL, mask, val); - if (decoder->irq) - /* Enable / Disable lock interrupt */ - regmap_update_bits(decoder->regmap, TVP5150_INT_ENABLE_REG_A, - TVP5150_INT_A_LOCK, int_val); return 0; } @@ -1342,6 +1579,19 @@ static int tvp5150_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regi } #endif +static int tvp5150_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_src_change_event_subdev_subscribe(sd, fh, sub); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub); + default: + return -EINVAL; + } +} + static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { int status = tvp5150_read(sd, 0x88); @@ -1352,40 +1602,96 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) static int tvp5150_registered(struct v4l2_subdev *sd) { -#ifdef CONFIG_MEDIA_CONTROLLER +#if defined(CONFIG_MEDIA_CONTROLLER) struct tvp5150 *decoder = to_tvp5150(sd); - int ret = 0; - int i; - - for (i = 0; i < TVP5150_INPUT_NUM; i++) { - struct media_entity *input = &decoder->input_ent[i]; - struct media_pad *pad = &decoder->input_pad[i]; - - if (!input->name) - continue; + unsigned int i; + int ret; - decoder->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + /* + * Setup connector pads and links. Enable the link to the first + * available connector per default. + */ + for (i = 0; i < decoder->connectors_num; i++) { + struct media_entity *con = &decoder->connectors[i].ent; + struct media_pad *pad = &decoder->connectors[i].pad; + struct v4l2_fwnode_connector *v4l2c = + &decoder->connectors[i].base; + struct v4l2_connector_link *link = + v4l2_connector_first_link(v4l2c); + unsigned int port = link->fwnode_link.remote_port; + unsigned int flags = i ? 0 : MEDIA_LNK_FL_ENABLED; + bool is_svideo = v4l2c->type == V4L2_CONN_SVIDEO; + + pad->flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(con, 1, pad); + if (ret < 0) + goto err; - ret = media_entity_pads_init(input, 1, pad); + ret = media_device_register_entity(sd->v4l2_dev->mdev, con); if (ret < 0) - return ret; + goto err; - ret = media_device_register_entity(sd->v4l2_dev->mdev, input); + ret = media_create_pad_link(con, 0, &sd->entity, port, flags); if (ret < 0) - return ret; + goto err; - ret = media_create_pad_link(input, 0, &sd->entity, - TVP5150_PAD_IF_INPUT, 0); - if (ret < 0) { - media_device_unregister_entity(input); - return ret; + if (is_svideo) { + /* + * Check tvp5150_link_setup() comments for more + * information. + */ + link = v4l2_connector_last_link(v4l2c); + port = link->fwnode_link.remote_port; + ret = media_create_pad_link(con, 0, &sd->entity, port, + flags); + if (ret < 0) + goto err; + } + + /* Enable default input. */ + if (flags == MEDIA_LNK_FL_ENABLED) { + decoder->input = + is_svideo ? TVP5150_SVIDEO : + port == 0 ? TVP5150_COMPOSITE0 : + TVP5150_COMPOSITE1; + + tvp5150_selmux(sd); + decoder->cur_connector = &decoder->connectors[i]; + tvp5150_s_std(sd, v4l2c->connector.analog.sdtv_stds); } } + + return 0; + +err: + for (i = 0; i < decoder->connectors_num; i++) + media_device_unregister_entity(&decoder->connectors[i].ent); + return ret; #endif return 0; } +static int tvp5150_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + int ret; + + ret = pm_runtime_get_sync(sd->dev); + if (ret < 0) { + pm_runtime_put_noidle(sd->dev); + return ret; + } + + return 0; +} + +static int tvp5150_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + pm_runtime_put(sd->dev); + + return 0; +} + /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = { @@ -1399,6 +1705,8 @@ static const struct v4l2_subdev_core_ops tvp5150_core_ops = { .g_register = tvp5150_g_register, .s_register = tvp5150_s_register, #endif + .subscribe_event = tvp5150_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = { @@ -1441,6 +1749,8 @@ static const struct v4l2_subdev_ops tvp5150_ops = { static const struct v4l2_subdev_internal_ops tvp5150_internal_ops = { .registered = tvp5150_registered, + .open = tvp5150_open, + .close = tvp5150_close, }; /**************************************************************************** @@ -1591,102 +1901,211 @@ static int tvp5150_init(struct i2c_client *c) return 0; } -static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np) +#if defined(CONFIG_MEDIA_CONTROLLER) +static int tvp5150_mc_init(struct tvp5150 *decoder) { - struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; - struct device_node *ep; -#ifdef CONFIG_MEDIA_CONTROLLER - struct device_node *connectors, *child; - struct media_entity *input; - const char *name; - u32 input_type; -#endif - unsigned int flags; - int ret = 0; + struct v4l2_subdev *sd = &decoder->sd; + unsigned int i; - ep = of_graph_get_next_endpoint(np, NULL); - if (!ep) - return -EINVAL; + sd->entity.ops = &tvp5150_sd_media_ops; + sd->entity.function = MEDIA_ENT_F_ATV_DECODER; - ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg); - if (ret) - goto err; + for (i = 0; i < TVP5150_NUM_PADS - 1; i++) { + decoder->pads[i].flags = MEDIA_PAD_FL_SINK; + decoder->pads[i].sig_type = PAD_SIGNAL_ANALOG; + } - flags = bus_cfg.bus.parallel.flags; + decoder->pads[i].flags = MEDIA_PAD_FL_SOURCE; + decoder->pads[i].sig_type = PAD_SIGNAL_DV; - if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL && - !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH && - flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH && - flags & V4L2_MBUS_FIELD_EVEN_LOW)) { - ret = -EINVAL; - goto err; - } + return media_entity_pads_init(&sd->entity, TVP5150_NUM_PADS, + decoder->pads); +} - decoder->mbus_type = bus_cfg.bus_type; +#else /* !defined(CONFIG_MEDIA_CONTROLLER) */ -#ifdef CONFIG_MEDIA_CONTROLLER - connectors = of_get_child_by_name(np, "connectors"); +static inline int tvp5150_mc_init(struct tvp5150 *decoder) +{ + return 0; +} +#endif /* defined(CONFIG_MEDIA_CONTROLLER) */ - if (!connectors) - goto err; +static int tvp5150_validate_connectors(struct tvp5150 *decoder) +{ + struct device *dev = decoder->sd.dev; + struct tvp5150_connector *tvpc; + struct v4l2_fwnode_connector *v4l2c; + unsigned int i; + + if (!decoder->connectors_num) { + dev_err(dev, "No valid connector found\n"); + return -ENODEV; + } - for_each_available_child_of_node(connectors, child) { - ret = of_property_read_u32(child, "input", &input_type); - if (ret) { - dev_err(decoder->sd.dev, - "missing type property in node %pOFn\n", - child); - of_node_put(child); - goto err_connector; + for (i = 0; i < decoder->connectors_num; i++) { + struct v4l2_connector_link *link0 = NULL; + struct v4l2_connector_link *link1; + + tvpc = &decoder->connectors[i]; + v4l2c = &tvpc->base; + + if (v4l2c->type == V4L2_CONN_COMPOSITE) { + if (v4l2c->nr_of_links != 1) { + dev_err(dev, "Composite: connector needs 1 link\n"); + return -EINVAL; + } + link0 = v4l2_connector_first_link(v4l2c); + if (!link0) { + dev_err(dev, "Composite: invalid first link\n"); + return -EINVAL; + } + if (link0->fwnode_link.remote_id == 1) { + dev_err(dev, "Composite: invalid endpoint id\n"); + return -EINVAL; + } } - if (input_type >= TVP5150_INPUT_NUM) { - ret = -EINVAL; - of_node_put(child); - goto err_connector; + if (v4l2c->type == V4L2_CONN_SVIDEO) { + if (v4l2c->nr_of_links != 2) { + dev_err(dev, "SVideo: connector needs 2 links\n"); + return -EINVAL; + } + link0 = v4l2_connector_first_link(v4l2c); + if (!link0) { + dev_err(dev, "SVideo: invalid first link\n"); + return -EINVAL; + } + link1 = v4l2_connector_last_link(v4l2c); + if (link0->fwnode_link.remote_port == + link1->fwnode_link.remote_port) { + dev_err(dev, "SVideo: invalid link setup\n"); + return -EINVAL; + } } - input = &decoder->input_ent[input_type]; + if (!(v4l2c->connector.analog.sdtv_stds & TVP5150_STD_MASK)) { + dev_err(dev, "Unsupported tv-norm on connector %s\n", + v4l2c->name); + return -EINVAL; + } + } + + return 0; +} + +static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np) +{ + struct device *dev = decoder->sd.dev; + struct v4l2_fwnode_endpoint bus_cfg = { + .bus_type = V4L2_MBUS_UNKNOWN + }; + struct device_node *ep_np; + struct tvp5150_connector *tvpc; + struct v4l2_fwnode_connector *v4l2c; + unsigned int flags, ep_num; + unsigned int i; + int ret; + + /* At least 1 output and 1 input */ + ep_num = of_graph_get_endpoint_count(np); + if (ep_num < 2 || ep_num > 5) { + dev_err(dev, "At least 1 input and 1 output must be connected to the device.\n"); + return -EINVAL; + } - /* Each input connector can only be defined once */ - if (input->name) { - dev_err(decoder->sd.dev, - "input %s with same type already exists\n", - input->name); - of_node_put(child); - ret = -EINVAL; - goto err_connector; + /* Layout if all connectors are used: + * + * tvp-5150 port@0 (AIP1A) + * endpoint@0 -----------> Comp0-Con port + * endpoint@1 --------+--> Svideo-Con port + * tvp-5150 port@1 (AIP1B) | + * endpoint@1 --------+ + * endpoint@0 -----------> Comp1-Con port + * tvp-5150 port@2 + * endpoint (video bitstream output at YOUT[0-7] parallel bus) + */ + for_each_endpoint_of_node(np, ep_np) { + struct fwnode_handle *ep_fwnode = of_fwnode_handle(ep_np); + unsigned int next_connector = decoder->connectors_num; + struct of_endpoint ep; + + of_graph_parse_endpoint(ep_np, &ep); + if (ep.port > 1 || ep.id > 1) { + dev_dbg(dev, "Ignore connector on port@%u/ep@%u\n", + ep.port, ep.id); + continue; } - switch (input_type) { - case TVP5150_COMPOSITE0: - case TVP5150_COMPOSITE1: - input->function = MEDIA_ENT_F_CONN_COMPOSITE; - break; - case TVP5150_SVIDEO: - input->function = MEDIA_ENT_F_CONN_SVIDEO; - break; + tvpc = &decoder->connectors[next_connector]; + v4l2c = &tvpc->base; + + if (ep.port == 0 || (ep.port == 1 && ep.id == 0)) { + ret = v4l2_fwnode_connector_parse(ep_fwnode, v4l2c); + if (ret) + goto err_put; + ret = v4l2_fwnode_connector_add_link(ep_fwnode, v4l2c); + if (ret) + goto err_put; + decoder->connectors_num++; + } else { + /* Adding the 2nd svideo link */ + for (i = 0; i < TVP5150_MAX_CONNECTORS; i++) { + tvpc = &decoder->connectors[i]; + v4l2c = &tvpc->base; + if (v4l2c->type == V4L2_CONN_SVIDEO) + break; + } + + ret = v4l2_fwnode_connector_add_link(ep_fwnode, v4l2c); + if (ret) + goto err_put; } + } - input->flags = MEDIA_ENT_FL_CONNECTOR; + ret = tvp5150_validate_connectors(decoder); + if (ret) + goto err_free; + + for (i = 0; i < decoder->connectors_num; i++) { + tvpc = &decoder->connectors[i]; + v4l2c = &tvpc->base; + tvpc->ent.flags = MEDIA_ENT_FL_CONNECTOR; + tvpc->ent.function = v4l2c->type == V4L2_CONN_SVIDEO ? + MEDIA_ENT_F_CONN_SVIDEO : MEDIA_ENT_F_CONN_COMPOSITE; + tvpc->ent.name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", + v4l2c->name, v4l2c->label ? + v4l2c->label : ""); + } - ret = of_property_read_string(child, "label", &name); - if (ret < 0) { - dev_err(decoder->sd.dev, - "missing label property in node %pOFn\n", - child); - of_node_put(child); - goto err_connector; - } + ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0); + if (!ep_np) { + dev_err(dev, "Error no output endpoint available\n"); + goto err_free; + } + ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_np), &bus_cfg); + of_node_put(ep_np); + if (ret) + goto err_free; - input->name = name; + flags = bus_cfg.bus.parallel.flags; + if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL && + !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH && + flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH && + flags & V4L2_MBUS_FIELD_EVEN_LOW)) { + ret = -EINVAL; + goto err_free; } -err_connector: - of_node_put(connectors); -#endif -err: - of_node_put(ep); + decoder->mbus_type = bus_cfg.bus_type; + + return 0; + +err_put: + of_node_put(ep_np); +err_free: + for (i = 0; i < TVP5150_MAX_CONNECTORS; i++) + v4l2_fwnode_connector_free(&decoder->connectors[i].base); + return ret; } @@ -1701,6 +2120,7 @@ static int tvp5150_probe(struct i2c_client *c) struct v4l2_subdev *sd; struct device_node *np = c->dev.of_node; struct regmap *map; + unsigned int i; int res; /* Check if the adapter supports the needed features */ @@ -1722,6 +2142,9 @@ static int tvp5150_probe(struct i2c_client *c) core->regmap = map; sd = &core->sd; + v4l2_i2c_subdev_init(sd, c, &tvp5150_ops); + sd->internal_ops = &tvp5150_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; if (IS_ENABLED(CONFIG_OF) && np) { res = tvp5150_parse_dt(core, np); @@ -1734,30 +2157,29 @@ static int tvp5150_probe(struct i2c_client *c) core->mbus_type = V4L2_MBUS_BT656; } - v4l2_i2c_subdev_init(sd, c, &tvp5150_ops); - sd->internal_ops = &tvp5150_internal_ops; - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - -#if defined(CONFIG_MEDIA_CONTROLLER) - core->pads[TVP5150_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; - core->pads[TVP5150_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG; - core->pads[TVP5150_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; - core->pads[TVP5150_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV; - - sd->entity.function = MEDIA_ENT_F_ATV_DECODER; - - res = media_entity_pads_init(&sd->entity, TVP5150_NUM_PADS, core->pads); - if (res < 0) + res = tvp5150_mc_init(core); + if (res) return res; - sd->entity.ops = &tvp5150_sd_media_ops; -#endif - res = tvp5150_detect_version(core); if (res < 0) return res; - core->norm = V4L2_STD_ALL; /* Default is autodetect */ + /* + * Iterate over all available connectors in case they are supported and + * successfully parsed. Fallback to default autodetect in case they + * aren't supported. + */ + for (i = 0; i < core->connectors_num; i++) { + struct v4l2_fwnode_connector *v4l2c; + + v4l2c = &core->connectors[i].base; + core->norm |= v4l2c->connector.analog.sdtv_stds; + } + + if (!core->connectors_num) + core->norm = V4L2_STD_ALL; + core->detected_norm = V4L2_STD_UNKNOWN; core->input = TVP5150_COMPOSITE1; core->enable = true; @@ -1802,6 +2224,11 @@ static int tvp5150_probe(struct i2c_client *c) if (debug > 1) tvp5150_log_status(sd); + + pm_runtime_set_active(&c->dev); + pm_runtime_enable(&c->dev); + pm_runtime_idle(&c->dev); + return 0; err: @@ -1813,18 +2240,32 @@ static int tvp5150_remove(struct i2c_client *c) { struct v4l2_subdev *sd = i2c_get_clientdata(c); struct tvp5150 *decoder = to_tvp5150(sd); + unsigned int i; dev_dbg_lvl(sd->dev, 1, debug, "tvp5150.c: removing tvp5150 adapter on address 0x%x\n", c->addr << 1); + for (i = 0; i < decoder->connectors_num; i++) + v4l2_fwnode_connector_free(&decoder->connectors[i].base); + for (i = 0; i < decoder->connectors_num; i++) + media_device_unregister_entity(&decoder->connectors[i].ent); v4l2_async_unregister_subdev(sd); v4l2_ctrl_handler_free(&decoder->hdl); + pm_runtime_disable(&c->dev); + pm_runtime_set_suspended(&c->dev); + return 0; } /* ----------------------------------------------------------------------- */ +static const struct dev_pm_ops tvp5150_pm_ops = { + SET_RUNTIME_PM_OPS(tvp5150_runtime_suspend, + tvp5150_runtime_resume, + NULL) +}; + static const struct i2c_device_id tvp5150_id[] = { { "tvp5150", 0 }, { } @@ -1843,6 +2284,7 @@ static struct i2c_driver tvp5150_driver = { .driver = { .of_match_table = of_match_ptr(tvp5150_of_match), .name = "tvp5150", + .pm = &tvp5150_pm_ops, }, .probe_new = tvp5150_probe, .remove = tvp5150_remove, diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index 078141712c88..0465832a4090 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -255,7 +255,7 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on) return amg88xx_set_power_off(data); } -#if IS_ENABLED(CONFIG_HWMON) +#if IS_REACHABLE(CONFIG_HWMON) static const u32 amg88xx_temp_config[] = { HWMON_T_INPUT, @@ -858,7 +858,7 @@ static int video_i2c_probe(struct i2c_client *client, } } - ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&data->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error_pm_disable; diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 7c429ce98bae..211279c5fd77 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink, return -EINVAL; for (i = 0; i < entity->num_pads; i++) { - if (entity->pads[i].flags == MEDIA_PAD_FL_SINK) + if (entity->pads[i].flags & MEDIA_PAD_FL_SINK) pad_is_sink = true; - else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE) + else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE) pad_is_sink = false; else continue; /* This is an error! */ @@ -662,9 +662,14 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_link *link; struct media_link *backlink; - BUG_ON(source == NULL || sink == NULL); - BUG_ON(source_pad >= source->num_pads); - BUG_ON(sink_pad >= sink->num_pads); + if (WARN_ON(!source || !sink) || + WARN_ON(source_pad >= source->num_pads) || + WARN_ON(sink_pad >= sink->num_pads)) + return -EINVAL; + if (WARN_ON(!(source->pads[source_pad].flags & MEDIA_PAD_FL_SOURCE))) + return -EINVAL; + if (WARN_ON(!(sink->pads[sink_pad].flags & MEDIA_PAD_FL_SINK))) + return -EINVAL; link = media_add_link(&source->links); if (link == NULL) diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index a359da7773a9..9144f795fb93 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -2964,7 +2964,7 @@ static int bttv_open(struct file *file) dprintk("open dev=%s\n", video_device_node_name(vdev)); - if (vdev->vfl_type == VFL_TYPE_GRABBER) { + if (vdev->vfl_type == VFL_TYPE_VIDEO) { type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } else if (vdev->vfl_type == VFL_TYPE_VBI) { type = V4L2_BUF_TYPE_VBI_CAPTURE; @@ -3905,7 +3905,7 @@ static int bttv_register_video(struct bttv *btv) if (no_overlay <= 0) btv->video_dev.device_caps |= V4L2_CAP_VIDEO_OVERLAY; - if (video_register_device(&btv->video_dev, VFL_TYPE_GRABBER, + if (video_register_device(&btv->video_dev, VFL_TYPE_VIDEO, video_nr[btv->c.nr]) < 0) goto err; pr_info("%d: registered device %s\n", diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c index c5207501d5e0..0ff37496c9ab 100644 --- a/drivers/media/pci/cobalt/cobalt-v4l2.c +++ b/drivers/media/pci/cobalt/cobalt-v4l2.c @@ -1272,7 +1272,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node) video_set_drvdata(vdev, s); ret = vb2_queue_init(q); if (!s->is_audio && ret == 0) - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); else if (!s->is_dummy) ret = cobalt_alsa_init(s); diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c index b79718519b9b..3178df3c4922 100644 --- a/drivers/media/pci/cx18/cx18-streams.c +++ b/drivers/media/pci/cx18/cx18-streams.c @@ -48,19 +48,19 @@ static struct { } cx18_stream_info[] = { { /* CX18_ENC_STREAM_TYPE_MPG */ "encoder MPEG", - VFL_TYPE_GRABBER, 0, + VFL_TYPE_VIDEO, 0, PCI_DMA_FROMDEVICE, V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_AUDIO | V4L2_CAP_TUNER }, { /* CX18_ENC_STREAM_TYPE_TS */ "TS", - VFL_TYPE_GRABBER, -1, + VFL_TYPE_VIDEO, -1, PCI_DMA_FROMDEVICE, }, { /* CX18_ENC_STREAM_TYPE_YUV */ "encoder YUV", - VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET, + VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET, PCI_DMA_FROMDEVICE, V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER @@ -74,13 +74,13 @@ static struct { }, { /* CX18_ENC_STREAM_TYPE_PCM */ "encoder PCM audio", - VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET, + VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET, PCI_DMA_FROMDEVICE, V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, }, { /* CX18_ENC_STREAM_TYPE_IDX */ "encoder IDX", - VFL_TYPE_GRABBER, -1, + VFL_TYPE_VIDEO, -1, PCI_DMA_FROMDEVICE, }, { /* CX18_ENC_STREAM_TYPE_RAD */ @@ -434,7 +434,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type) name = video_device_node_name(&s->video_dev); switch (vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n", name, s->name, cx->stream_buffers[type], cx->stream_buf_size[type] / 1024, diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index 2327fe612610..434677bd4ad1 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -1545,7 +1545,7 @@ int cx23885_417_register(struct cx23885_dev *dev) if (dev->tuner_type != TUNER_ABSENT) dev->v4l_device->device_caps |= V4L2_CAP_TUNER; err = video_register_device(dev->v4l_device, - VFL_TYPE_GRABBER, -1); + VFL_TYPE_VIDEO, -1); if (err < 0) { pr_info("%s: can't register mpeg device\n", dev->name); return err; diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 7fc408ee4934..000c108b94fd 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -1304,7 +1304,7 @@ int cx23885_video_register(struct cx23885_dev *dev) V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE; if (dev->tuner_type != TUNER_ABSENT) dev->video_dev->device_caps |= V4L2_CAP_TUNER; - err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER, + err = video_register_device(dev->video_dev, VFL_TYPE_VIDEO, video_nr[dev->nr]); if (err < 0) { pr_info("%s: can't register video device\n", diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index a10261da0db6..1b80c990cb94 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c @@ -757,7 +757,7 @@ int cx25821_video_register(struct cx25821_dev *dev) snprintf(vdev->name, sizeof(vdev->name), "%s #%d", dev->name, i); video_set_drvdata(vdev, chan); - err = video_register_device(vdev, VFL_TYPE_GRABBER, + err = video_register_device(vdev, VFL_TYPE_VIDEO, video_nr[dev->nr]); if (err < 0) diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index d3da7f4297af..fa4ca002ed19 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -1138,7 +1138,7 @@ static int blackbird_register_video(struct cx8802_dev *dev) V4L2_CAP_VIDEO_CAPTURE; if (dev->core->board.tuner_type != UNSET) dev->mpeg_dev.device_caps |= V4L2_CAP_TUNER; - err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&dev->mpeg_dev, VFL_TYPE_VIDEO, -1); if (err < 0) { pr_info("can't register mpeg device\n"); return err; diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index b8abcd550604..6aabc45aa93c 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1451,7 +1451,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, V4L2_CAP_VIDEO_CAPTURE; if (core->board.tuner_type != UNSET) dev->video_dev.device_caps |= V4L2_CAP_TUNER; - err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER, + err = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO, video_nr[core->nr]); if (err < 0) { pr_err("can't register video device\n"); diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c index 7480f0d3ad0f..82581aa5a2a3 100644 --- a/drivers/media/pci/dt3155/dt3155.c +++ b/drivers/media/pci/dt3155/dt3155.c @@ -550,7 +550,7 @@ static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id) IRQF_SHARED, DT3155_NAME, pd); if (err) goto err_iounmap; - err = video_register_device(&pd->vdev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&pd->vdev, VFL_TYPE_VIDEO, -1); if (err) goto err_free_irq; dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev.minor); diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 1adfdc7ab0db..92f5eadf2c99 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1647,7 +1647,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) vdev->queue = &q->vbq; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; video_set_drvdata(vdev, cio2); - r = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + r = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (r) { dev_err(&cio2->pci_dev->dev, "failed to register video device (%d)\n", r); diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c index f7de9118f609..f04ee84bab5f 100644 --- a/drivers/media/pci/ivtv/ivtv-streams.c +++ b/drivers/media/pci/ivtv/ivtv-streams.c @@ -99,7 +99,7 @@ static struct { } ivtv_stream_info[] = { { /* IVTV_ENC_STREAM_TYPE_MPG */ "encoder MPG", - VFL_TYPE_GRABBER, 0, + VFL_TYPE_VIDEO, 0, PCI_DMA_FROMDEVICE, 0, V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, @@ -107,7 +107,7 @@ static struct { }, { /* IVTV_ENC_STREAM_TYPE_YUV */ "encoder YUV", - VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET, + VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET, PCI_DMA_FROMDEVICE, 0, V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, @@ -123,7 +123,7 @@ static struct { }, { /* IVTV_ENC_STREAM_TYPE_PCM */ "encoder PCM", - VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET, + VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET, PCI_DMA_FROMDEVICE, 0, V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, &ivtv_v4l2_enc_fops @@ -137,7 +137,7 @@ static struct { }, { /* IVTV_DEC_STREAM_TYPE_MPG */ "decoder MPG", - VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET, + VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET, PCI_DMA_TODEVICE, 0, V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, &ivtv_v4l2_dec_fops @@ -158,7 +158,7 @@ static struct { }, { /* IVTV_DEC_STREAM_TYPE_YUV */ "decoder YUV", - VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET, + VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET, PCI_DMA_TODEVICE, 0, V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, &ivtv_v4l2_dec_fops @@ -318,7 +318,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type) name = video_device_node_name(&s->vdev); switch (vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: IVTV_INFO("Registered device %s for %s (%d kB)\n", name, s->name, itv->options.kilobytes[type]); break; diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 3a4c29bc0ba5..73e064e6f56d 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1711,7 +1711,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) v4l2_ctrl_handler_setup(&meye.hdl); meye.vdev.ctrl_handler = &meye.hdl; - if (video_register_device(&meye.vdev, VFL_TYPE_GRABBER, + if (video_register_device(&meye.vdev, VFL_TYPE_VIDEO, video_nr) < 0) { v4l2_err(v4l2_dev, "video_register_device failed\n"); goto outvideoreg; diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index 2d582c02adbf..e4623ed2f831 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -1214,7 +1214,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev, if (saa7134_no_overlay <= 0) dev->video_dev->device_caps |= V4L2_CAP_VIDEO_OVERLAY; - err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, + err = video_register_device(dev->video_dev,VFL_TYPE_VIDEO, video_nr[dev->nr]); if (err < 0) { pr_info("%s: can't register video device\n", diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index cb65d345fd3e..8ad7879bd840 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -291,7 +291,7 @@ static int empress_init(struct saa7134_dev *dev) dev->empress_dev->device_caps |= V4L2_CAP_TUNER; video_set_drvdata(dev->empress_dev, dev); - err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, + err = video_register_device(dev->empress_dev,VFL_TYPE_VIDEO, empress_nr[dev->nr]); if (err < 0) { pr_info("%s: can't register video device\n", diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c index 342cabf48064..a8ac94fadc14 100644 --- a/drivers/media/pci/saa7134/saa7134-video.c +++ b/drivers/media/pci/saa7134/saa7134-video.c @@ -1008,8 +1008,7 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) */ if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) || (dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq))) - pm_qos_add_request(&dev->qos_request, - PM_QOS_CPU_DMA_LATENCY, 20); + cpu_latency_qos_add_request(&dev->qos_request, 20); dmaq->seq_nr = 0; return 0; @@ -1024,7 +1023,7 @@ void saa7134_vb2_stop_streaming(struct vb2_queue *vq) if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) || (dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq))) - pm_qos_remove_request(&dev->qos_request); + cpu_latency_qos_remove_request(&dev->qos_request); } static const struct vb2_ops vb2_qops = { diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index f96226930670..2214c74bbbf1 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -289,7 +289,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; - ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER); + ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_VIDEO); if (ret < 0) { pr_err("cannot register capture v4l2 device. skipping.\n"); saa7146_vv_release(dev); diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index bf5e55348f15..39d14c179d22 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -362,7 +362,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; - if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) { + if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_VIDEO)) { pr_err("cannot register capture v4l2 device. skipping.\n"); return -1; } diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index e6a71c17566d..129a1f8ebe1a 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -707,7 +707,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data vv_data.vid_ops.vidioc_g_register = vidioc_g_register; vv_data.vid_ops.vidioc_s_register = vidioc_s_register; #endif - if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { + if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_VIDEO)) { ERR("cannot register capture v4l2 device. skipping.\n"); saa7146_vv_release(dev); return -1; diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c index 3fca7257a720..11e1eb6a6809 100644 --- a/drivers/media/pci/saa7164/saa7164-encoder.c +++ b/drivers/media/pci/saa7164/saa7164-encoder.c @@ -1087,7 +1087,7 @@ int saa7164_encoder_register(struct saa7164_port *port) v4l2_ctrl_handler_setup(hdl); video_set_drvdata(port->v4l_device, port); result = video_register_device(port->v4l_device, - VFL_TYPE_GRABBER, -1); + VFL_TYPE_VIDEO, -1); if (result < 0) { printk(KERN_INFO "%s: can't register mpeg device\n", dev->name); diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c index 476d7f3b32d6..cbf85231b708 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c @@ -1304,7 +1304,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev, solo_enc->vfd->queue = &solo_enc->vidq; solo_enc->vfd->lock = &solo_enc->lock; video_set_drvdata(solo_enc->vfd, solo_enc); - ret = video_register_device(solo_enc->vfd, VFL_TYPE_GRABBER, nr); + ret = video_register_device(solo_enc->vfd, VFL_TYPE_VIDEO, nr); if (ret < 0) goto vdev_release; diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c index 78792067e920..54434f3c428d 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c @@ -692,7 +692,7 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr) while (erase_off(solo_dev)) /* Do nothing */; - ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, nr); + ret = video_register_device(solo_dev->vfd, VFL_TYPE_VIDEO, nr); if (ret < 0) goto fail; diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index fd3de3bb0c89..798574cfad35 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -1069,7 +1069,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev, vip->video_dev.lock = &vip->v4l_lock; video_set_drvdata(&vip->video_dev, vip); - ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&vip->video_dev, VFL_TYPE_VIDEO, -1); if (ret) goto vrelease; diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c index f3d6c3cdb872..cabe006658dd 100644 --- a/drivers/media/pci/ttpci/av7110_v4l.c +++ b/drivers/media/pci/ttpci/av7110_v4l.c @@ -831,7 +831,7 @@ int av7110_init_v4l(struct av7110 *av7110) if (FW_VERSION(av7110->arm_app) < 0x2623) vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT; - if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) { + if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_VIDEO)) { ERR("cannot register capture device. skipping\n"); saa7146_vv_release(dev); return -ENODEV; diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index e2d482af2367..38cac508bd72 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -1470,7 +1470,7 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; - if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) { + if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) { /* fixme: proper cleanup here */ ERR("cannot register capture v4l2 device\n"); saa7146_vv_release(dev); diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index 09732eed7eb4..ec1e06da7e4f 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c @@ -1156,7 +1156,7 @@ static int tw5864_video_input_init(struct tw5864_input *input, int video_nr) input->gop = GOP_SIZE; input->frame_interval = 1; - ret = video_register_device(&input->vdev, VFL_TYPE_GRABBER, video_nr); + ret = video_register_device(&input->vdev, VFL_TYPE_VIDEO, video_nr); if (ret) goto free_v4l2_hdl; diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c index 2fb82d50c53e..10986fcd66a5 100644 --- a/drivers/media/pci/tw68/tw68-video.c +++ b/drivers/media/pci/tw68/tw68-video.c @@ -962,7 +962,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr) dev->vdev.lock = &dev->lock; dev->vdev.queue = &dev->vidq; video_set_drvdata(&dev->vdev, dev); - return video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr); + return video_register_device(&dev->vdev, VFL_TYPE_VIDEO, video_nr); } /* diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c index 9be8c6e4fb69..1ced2b0ddb24 100644 --- a/drivers/media/pci/tw686x/tw686x-video.c +++ b/drivers/media/pci/tw686x/tw686x-video.c @@ -1282,7 +1282,7 @@ int tw686x_video_init(struct tw686x_dev *dev) vc->device = vdev; video_set_drvdata(vdev, vc); - err = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + err = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (err < 0) goto error; vc->num = vdev->num; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index f65e98d3adf2..e01bbb9dd1c1 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -507,6 +507,18 @@ config VIDEO_SUN8I_DEINTERLACE capability found on some SoCs, like H3. To compile this driver as a module choose m here. +config VIDEO_SUN8I_ROTATE + tristate "Allwinner DE2 rotation driver" + depends on VIDEO_DEV && VIDEO_V4L2 + depends on ARCH_SUNXI || COMPILE_TEST + depends on COMMON_CLK && OF + depends on PM + select VIDEOBUF2_DMA_CONTIG + select V4L2_MEM2MEM_DEV + help + Support for the Allwinner DE2 rotation unit. + To compile this driver as a module choose m here. + endif # V4L_MEM2MEM_DRIVERS # TI VIDEO PORT Helper Modules @@ -610,49 +622,49 @@ config CEC_GPIO between compatible devices. config VIDEO_SAMSUNG_S5P_CEC - tristate "Samsung S5P CEC driver" - depends on ARCH_EXYNOS || COMPILE_TEST - select CEC_CORE - select CEC_NOTIFIER - help - This is a driver for Samsung S5P HDMI CEC interface. It uses the - generic CEC framework interface. - CEC bus is present in the HDMI connector and enables communication - between compatible devices. + tristate "Samsung S5P CEC driver" + depends on ARCH_EXYNOS || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for Samsung S5P HDMI CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. config VIDEO_STI_HDMI_CEC - tristate "STMicroelectronics STiH4xx HDMI CEC driver" - depends on ARCH_STI || COMPILE_TEST - select CEC_CORE - select CEC_NOTIFIER - help - This is a driver for STIH4xx HDMI CEC interface. It uses the - generic CEC framework interface. - CEC bus is present in the HDMI connector and enables communication - between compatible devices. + tristate "STMicroelectronics STiH4xx HDMI CEC driver" + depends on ARCH_STI || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for STIH4xx HDMI CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. config VIDEO_STM32_HDMI_CEC - tristate "STMicroelectronics STM32 HDMI CEC driver" - depends on ARCH_STM32 || COMPILE_TEST - select REGMAP - select REGMAP_MMIO - select CEC_CORE - help - This is a driver for STM32 interface. It uses the - generic CEC framework interface. - CEC bus is present in the HDMI connector and enables communication - between compatible devices. + tristate "STMicroelectronics STM32 HDMI CEC driver" + depends on ARCH_STM32 || COMPILE_TEST + select REGMAP + select REGMAP_MMIO + select CEC_CORE + help + This is a driver for STM32 interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. config VIDEO_TEGRA_HDMI_CEC - tristate "Tegra HDMI CEC driver" - depends on ARCH_TEGRA || COMPILE_TEST - select CEC_CORE - select CEC_NOTIFIER - help - This is a driver for the Tegra HDMI CEC interface. It uses the - generic CEC framework interface. - The CEC bus is present in the HDMI connector and enables communication - between compatible devices. + tristate "Tegra HDMI CEC driver" + depends on ARCH_TEGRA || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for the Tegra HDMI CEC interface. It uses the + generic CEC framework interface. + The CEC bus is present in the HDMI connector and enables communication + between compatible devices. config VIDEO_SECO_CEC tristate "SECO Boards HDMI CEC driver" diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index 09104304bd06..66079cc41f38 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -285,6 +285,7 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc, max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 || + ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS || max_gamma > max_data) { vpfe_dbg(1, vpfe, "Invalid data line select\n"); return -EINVAL; @@ -324,7 +325,7 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc) static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev) { - struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc); + struct vpfe_device *vpfe = to_vpfe(ccdc); u32 dma_cntl, pcr; pcr = vpfe_reg_read(ccdc, VPFE_PCR); @@ -348,7 +349,7 @@ static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev) static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params) { - struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc); + struct vpfe_device *vpfe = to_vpfe(ccdc); struct vpfe_ccdc_config_params_raw raw_params; int x; @@ -504,7 +505,7 @@ vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc, */ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc) { - struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc); + struct vpfe_device *vpfe = to_vpfe(ccdc); struct vpfe_ccdc_config_params_raw *config_params = &ccdc->ccdc_cfg.bayer.config_params; struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer; @@ -609,7 +610,7 @@ static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc) static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt) { - struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc); + struct vpfe_device *vpfe = to_vpfe(ccdc); vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n", __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt)); @@ -741,7 +742,7 @@ static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr) static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc, struct vpfe_hw_if_param *params) { - struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc); + struct vpfe_device *vpfe = to_vpfe(ccdc); ccdc->ccdc_cfg.if_type = params->if_type; @@ -2267,7 +2268,7 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe) vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(vdev, vpfe); - err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1); if (err) { vpfe_err(vpfe, "Unable to register video device.\n"); diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index d8593cb2ae84..7d98db1d9b52 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -1,4 +1,6 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2020 IBM Corp. +// Copyright (c) 2019-2020 Intel Corporation #include <linux/atomic.h> #include <linux/bitfield.h> @@ -72,11 +74,8 @@ #define VE_SEQ_CTRL_CAP_BUSY BIT(16) #define VE_SEQ_CTRL_COMP_BUSY BIT(18) -#ifdef CONFIG_MACH_ASPEED_G5 -#define VE_SEQ_CTRL_JPEG_MODE BIT(13) /* AST2500 */ -#else -#define VE_SEQ_CTRL_JPEG_MODE BIT(8) /* AST2400 */ -#endif /* CONFIG_MACH_ASPEED_G5 */ +#define AST2500_VE_SEQ_CTRL_JPEG_MODE BIT(13) +#define AST2400_VE_SEQ_CTRL_JPEG_MODE BIT(8) #define VE_CTRL 0x008 #define VE_CTRL_HSYNC_POL BIT(0) @@ -133,7 +132,8 @@ #define VE_COMP_CTRL_HQ_DCT_CHR GENMASK(26, 22) #define VE_COMP_CTRL_HQ_DCT_LUM GENMASK(31, 27) -#define VE_OFFSET_COMP_STREAM 0x078 +#define AST2400_VE_COMP_SIZE_READ_BACK 0x078 +#define AST2600_VE_COMP_SIZE_READ_BACK 0x084 #define VE_SRC_LR_EDGE_DET 0x090 #define VE_SRC_LR_EDGE_DET_LEFT GENMASK(11, 0) @@ -220,6 +220,9 @@ struct aspeed_video { struct video_device vdev; struct mutex video_lock; /* v4l2 and videobuf2 lock */ + u32 jpeg_mode; + u32 comp_size_read; + wait_queue_head_t wait; spinlock_t lock; /* buffer list lock */ struct delayed_work res_work; @@ -243,6 +246,26 @@ struct aspeed_video { #define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev) +struct aspeed_video_config { + u32 jpeg_mode; + u32 comp_size_read; +}; + +static const struct aspeed_video_config ast2400_config = { + .jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, +}; + +static const struct aspeed_video_config ast2500_config = { + .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, +}; + +static const struct aspeed_video_config ast2600_config = { + .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK, +}; + static const u32 aspeed_video_jpeg_header[ASPEED_VIDEO_JPEG_HEADER_SIZE] = { 0xe0ffd8ff, 0x464a1000, 0x01004649, 0x60000101, 0x00006000, 0x0f00feff, 0x00002d05, 0x00000000, 0x00000000, 0x00dbff00 @@ -572,7 +595,7 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg) if (sts & VE_INTERRUPT_COMP_COMPLETE) { struct aspeed_video_buffer *buf; u32 frame_size = aspeed_video_read(video, - VE_OFFSET_COMP_STREAM); + video->comp_size_read); spin_lock(&video->lock); clear_bit(VIDEO_FRAME_INPRG, &video->flags); @@ -907,7 +930,7 @@ static void aspeed_video_init_regs(struct aspeed_video *video) FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) | FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10); u32 ctrl = VE_CTRL_AUTO_OR_CURSOR; - u32 seq_ctrl = VE_SEQ_CTRL_JPEG_MODE; + u32 seq_ctrl = video->jpeg_mode; if (video->frame_rate) ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate); @@ -1565,14 +1588,14 @@ static int aspeed_video_setup_video(struct aspeed_video *video) V4L2_CAP_STREAMING; vdev->v4l2_dev = v4l2_dev; strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name)); - vdev->vfl_type = VFL_TYPE_GRABBER; + vdev->vfl_type = VFL_TYPE_VIDEO; vdev->vfl_dir = VFL_DIR_RX; vdev->release = video_device_release_empty; vdev->ioctl_ops = &aspeed_video_ioctl_ops; vdev->lock = &video->video_lock; video_set_drvdata(vdev, video); - rc = video_register_device(vdev, VFL_TYPE_GRABBER, 0); + rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0); if (rc) { vb2_queue_release(vbq); v4l2_ctrl_handler_free(&video->ctrl_handler); @@ -1653,16 +1676,37 @@ err_unprepare_eclk: return rc; } +static const struct of_device_id aspeed_video_of_match[] = { + { .compatible = "aspeed,ast2400-video-engine", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-video-engine", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-video-engine", .data = &ast2600_config }, + {} +}; +MODULE_DEVICE_TABLE(of, aspeed_video_of_match); + static int aspeed_video_probe(struct platform_device *pdev) { + const struct aspeed_video_config *config; + const struct of_device_id *match; + struct aspeed_video *video; int rc; - struct resource *res; - struct aspeed_video *video = - devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL); + video = devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL); if (!video) return -ENOMEM; + video->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(video->base)) + return PTR_ERR(video->base); + + match = of_match_node(aspeed_video_of_match, pdev->dev.of_node); + if (!match) + return -EINVAL; + + config = match->data; + video->jpeg_mode = config->jpeg_mode; + video->comp_size_read = config->comp_size_read; + video->frame_rate = 30; video->dev = &pdev->dev; spin_lock_init(&video->lock); @@ -1671,13 +1715,6 @@ static int aspeed_video_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work); INIT_LIST_HEAD(&video->buffers); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - video->base = devm_ioremap_resource(video->dev, res); - - if (IS_ERR(video->base)) - return PTR_ERR(video->base); - rc = aspeed_video_init(video); if (rc) return rc; @@ -1716,13 +1753,6 @@ static int aspeed_video_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id aspeed_video_of_match[] = { - { .compatible = "aspeed,ast2400-video-engine" }, - { .compatible = "aspeed,ast2500-video-engine" }, - {} -}; -MODULE_DEVICE_TABLE(of, aspeed_video_of_match); - static struct platform_driver aspeed_video_driver = { .driver = { .name = DEVICE_NAME, diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c index d7669a03e98e..a6e9797a0ec9 100644 --- a/drivers/media/platform/atmel/atmel-isc-base.c +++ b/drivers/media/platform/atmel/atmel-isc-base.c @@ -22,6 +22,7 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/videodev2.h> +#include <linux/atmel-isc-media.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> @@ -224,10 +225,35 @@ const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = { (((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \ (((mbus_code) == MEDIA_BUS_FMT_Y8_1X8))) +#define ISC_CTRL_ISC_TO_V4L2(x) ((x) == ISC_WB_O_ZERO_VAL ? 0 : (x)) +#define ISC_CTRL_V4L2_TO_ISC(x) ((x) ? (x) : ISC_WB_O_ZERO_VAL) + +static inline void isc_update_v4l2_ctrls(struct isc_device *isc) +{ + struct isc_ctrls *ctrls = &isc->ctrls; + + /* In here we set the v4l2 controls w.r.t. our pipeline config */ + v4l2_ctrl_s_ctrl(isc->r_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_R]); + v4l2_ctrl_s_ctrl(isc->b_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_B]); + v4l2_ctrl_s_ctrl(isc->gr_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GR]); + v4l2_ctrl_s_ctrl(isc->gb_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GB]); + + v4l2_ctrl_s_ctrl(isc->r_off_ctrl, + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_R])); + v4l2_ctrl_s_ctrl(isc->b_off_ctrl, + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_B])); + v4l2_ctrl_s_ctrl(isc->gr_off_ctrl, + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GR])); + v4l2_ctrl_s_ctrl(isc->gb_off_ctrl, + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GB])); +} + static inline void isc_update_awb_ctrls(struct isc_device *isc) { struct isc_ctrls *ctrls = &isc->ctrls; + /* In here we set our actual hw pipeline config */ + regmap_write(isc->regmap, ISC_WB_O_RGR, (ISC_WB_O_ZERO_VAL - (ctrls->offset[ISC_HIS_CFG_MODE_R])) | ((ISC_WB_O_ZERO_VAL - ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16)); @@ -662,11 +688,9 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline) bay_cfg = isc->config.sd_format->cfa_baycfg; - if (ctrls->awb == ISC_WB_NONE) - isc_reset_awb_ctrls(isc); - regmap_write(regmap, ISC_WB_CFG, bay_cfg); isc_update_awb_ctrls(isc); + isc_update_v4l2_ctrls(isc); regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL); @@ -1396,6 +1420,7 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f) isc->try_config.sd_format != isc->config.sd_format) { isc->ctrls.hist_stat = HIST_INIT; isc_reset_awb_ctrls(isc); + isc_update_v4l2_ctrls(isc); } /* make the try configuration active */ isc->config = isc->try_config; @@ -1814,10 +1839,6 @@ static void isc_awb_work(struct work_struct *w) ctrls->hist_id = hist_id; baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT; - /* if no more auto white balance, reset controls. */ - if (ctrls->awb == ISC_WB_NONE) - isc_reset_awb_ctrls(isc); - pm_runtime_get_sync(isc->dev); /* @@ -1842,6 +1863,8 @@ static void isc_awb_work(struct work_struct *w) if (ctrls->awb == ISC_WB_ONETIME) { v4l2_info(&isc->v4l2_dev, "Completed one time white-balance adjustment.\n"); + /* update the v4l2 controls values */ + isc_update_v4l2_ctrls(isc); ctrls->awb = ISC_WB_NONE; } } @@ -1873,6 +1896,27 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_GAMMA: ctrls->gamma_index = ctrl->val; break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops isc_ctrl_ops = { + .s_ctrl = isc_s_ctrl, +}; + +static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl) +{ + struct isc_device *isc = container_of(ctrl->handler, + struct isc_device, ctrls.handler); + struct isc_ctrls *ctrls = &isc->ctrls; + + if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) + return 0; + + switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: if (ctrl->val == 1) ctrls->awb = ISC_WB_AUTO; @@ -1883,36 +1927,142 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl) if (!isc->config.sd_format) break; - if (ctrls->hist_stat != HIST_ENABLED) - isc_reset_awb_ctrls(isc); + /* configure the controls with new values from v4l2 */ + if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new) + ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val; + if (ctrl->cluster[ISC_CTRL_B_GAIN]->is_new) + ctrls->gain[ISC_HIS_CFG_MODE_B] = isc->b_gain_ctrl->val; + if (ctrl->cluster[ISC_CTRL_GR_GAIN]->is_new) + ctrls->gain[ISC_HIS_CFG_MODE_GR] = isc->gr_gain_ctrl->val; + if (ctrl->cluster[ISC_CTRL_GB_GAIN]->is_new) + ctrls->gain[ISC_HIS_CFG_MODE_GB] = isc->gb_gain_ctrl->val; + + if (ctrl->cluster[ISC_CTRL_R_OFF]->is_new) + ctrls->offset[ISC_HIS_CFG_MODE_R] = + ISC_CTRL_V4L2_TO_ISC(isc->r_off_ctrl->val); + if (ctrl->cluster[ISC_CTRL_B_OFF]->is_new) + ctrls->offset[ISC_HIS_CFG_MODE_B] = + ISC_CTRL_V4L2_TO_ISC(isc->b_off_ctrl->val); + if (ctrl->cluster[ISC_CTRL_GR_OFF]->is_new) + ctrls->offset[ISC_HIS_CFG_MODE_GR] = + ISC_CTRL_V4L2_TO_ISC(isc->gr_off_ctrl->val); + if (ctrl->cluster[ISC_CTRL_GB_OFF]->is_new) + ctrls->offset[ISC_HIS_CFG_MODE_GB] = + ISC_CTRL_V4L2_TO_ISC(isc->gb_off_ctrl->val); - if (isc->ctrls.awb == ISC_WB_AUTO && + isc_update_awb_ctrls(isc); + + if (vb2_is_streaming(&isc->vb2_vidq)) { + /* + * If we are streaming, we can update profile to + * have the new settings in place. + */ + isc_update_profile(isc); + } else { + /* + * The auto cluster will activate automatically this + * control. This has to be deactivated when not + * streaming. + */ + v4l2_ctrl_activate(isc->do_wb_ctrl, false); + } + + /* if we have autowhitebalance on, start histogram procedure */ + if (ctrls->awb == ISC_WB_AUTO && vb2_is_streaming(&isc->vb2_vidq) && ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) isc_set_histogram(isc, true); - break; - case V4L2_CID_DO_WHITE_BALANCE: - /* if AWB is enabled, do nothing */ - if (ctrls->awb == ISC_WB_AUTO) - return 0; + /* + * for one time whitebalance adjustment, check the button, + * if it's pressed, perform the one time operation. + */ + if (ctrls->awb == ISC_WB_NONE && + ctrl->cluster[ISC_CTRL_DO_WB]->is_new && + !(ctrl->cluster[ISC_CTRL_DO_WB]->flags & + V4L2_CTRL_FLAG_INACTIVE)) { + ctrls->awb = ISC_WB_ONETIME; + isc_set_histogram(isc, true); + v4l2_dbg(1, debug, &isc->v4l2_dev, + "One time white-balance started.\n"); + } + return 0; + } + return 0; +} - ctrls->awb = ISC_WB_ONETIME; - isc_set_histogram(isc, true); - v4l2_dbg(1, debug, &isc->v4l2_dev, - "One time white-balance started.\n"); +static int isc_g_volatile_awb_ctrl(struct v4l2_ctrl *ctrl) +{ + struct isc_device *isc = container_of(ctrl->handler, + struct isc_device, ctrls.handler); + struct isc_ctrls *ctrls = &isc->ctrls; + + switch (ctrl->id) { + /* being a cluster, this id will be called for every control */ + case V4L2_CID_AUTO_WHITE_BALANCE: + ctrl->cluster[ISC_CTRL_R_GAIN]->val = + ctrls->gain[ISC_HIS_CFG_MODE_R]; + ctrl->cluster[ISC_CTRL_B_GAIN]->val = + ctrls->gain[ISC_HIS_CFG_MODE_B]; + ctrl->cluster[ISC_CTRL_GR_GAIN]->val = + ctrls->gain[ISC_HIS_CFG_MODE_GR]; + ctrl->cluster[ISC_CTRL_GB_GAIN]->val = + ctrls->gain[ISC_HIS_CFG_MODE_GB]; + + ctrl->cluster[ISC_CTRL_R_OFF]->val = + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_R]); + ctrl->cluster[ISC_CTRL_B_OFF]->val = + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_B]); + ctrl->cluster[ISC_CTRL_GR_OFF]->val = + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GR]); + ctrl->cluster[ISC_CTRL_GB_OFF]->val = + ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GB]); break; - default: - return -EINVAL; } - return 0; } -static const struct v4l2_ctrl_ops isc_ctrl_ops = { - .s_ctrl = isc_s_ctrl, +static const struct v4l2_ctrl_ops isc_awb_ops = { + .s_ctrl = isc_s_awb_ctrl, + .g_volatile_ctrl = isc_g_volatile_awb_ctrl, }; +#define ISC_CTRL_OFF(_name, _id, _name_str) \ + static const struct v4l2_ctrl_config _name = { \ + .ops = &isc_awb_ops, \ + .id = _id, \ + .name = _name_str, \ + .type = V4L2_CTRL_TYPE_INTEGER, \ + .flags = V4L2_CTRL_FLAG_SLIDER, \ + .min = -4095, \ + .max = 4095, \ + .step = 1, \ + .def = 0, \ + } + +ISC_CTRL_OFF(isc_r_off_ctrl, ISC_CID_R_OFFSET, "Red Component Offset"); +ISC_CTRL_OFF(isc_b_off_ctrl, ISC_CID_B_OFFSET, "Blue Component Offset"); +ISC_CTRL_OFF(isc_gr_off_ctrl, ISC_CID_GR_OFFSET, "Green Red Component Offset"); +ISC_CTRL_OFF(isc_gb_off_ctrl, ISC_CID_GB_OFFSET, "Green Blue Component Offset"); + +#define ISC_CTRL_GAIN(_name, _id, _name_str) \ + static const struct v4l2_ctrl_config _name = { \ + .ops = &isc_awb_ops, \ + .id = _id, \ + .name = _name_str, \ + .type = V4L2_CTRL_TYPE_INTEGER, \ + .flags = V4L2_CTRL_FLAG_SLIDER, \ + .min = 0, \ + .max = 8191, \ + .step = 1, \ + .def = 512, \ + } + +ISC_CTRL_GAIN(isc_r_gain_ctrl, ISC_CID_R_GAIN, "Red Component Gain"); +ISC_CTRL_GAIN(isc_b_gain_ctrl, ISC_CID_B_GAIN, "Blue Component Gain"); +ISC_CTRL_GAIN(isc_gr_gain_ctrl, ISC_CID_GR_GAIN, "Green Red Component Gain"); +ISC_CTRL_GAIN(isc_gb_gain_ctrl, ISC_CID_GB_GAIN, "Green Blue Component Gain"); + static int isc_ctrl_init(struct isc_device *isc) { const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops; @@ -1923,7 +2073,7 @@ static int isc_ctrl_init(struct isc_device *isc) ctrls->hist_stat = HIST_INIT; isc_reset_awb_ctrls(isc); - ret = v4l2_ctrl_handler_init(hdl, 5); + ret = v4l2_ctrl_handler_init(hdl, 13); if (ret < 0) return ret; @@ -1933,10 +2083,13 @@ static int isc_ctrl_init(struct isc_device *isc) v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0); v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256); v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2); - v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); + isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops, + V4L2_CID_AUTO_WHITE_BALANCE, + 0, 1, 1, 1); /* do_white_balance is a button, so min,max,step,default are ignored */ - isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DO_WHITE_BALANCE, + isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops, + V4L2_CID_DO_WHITE_BALANCE, 0, 0, 0, 0); if (!isc->do_wb_ctrl) { @@ -1947,6 +2100,21 @@ static int isc_ctrl_init(struct isc_device *isc) v4l2_ctrl_activate(isc->do_wb_ctrl, false); + isc->r_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_gain_ctrl, NULL); + isc->b_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_gain_ctrl, NULL); + isc->gr_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_gain_ctrl, NULL); + isc->gb_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_gain_ctrl, NULL); + isc->r_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_off_ctrl, NULL); + isc->b_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_off_ctrl, NULL); + isc->gr_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_off_ctrl, NULL); + isc->gb_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_off_ctrl, NULL); + + /* + * The cluster is in auto mode with autowhitebalance enabled + * and manual mode otherwise. + */ + v4l2_ctrl_auto_cluster(10, &isc->awb_ctrl, 0, true); + v4l2_ctrl_handler_setup(hdl); return 0; @@ -2143,7 +2311,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier) vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; video_set_drvdata(vdev, isc); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { v4l2_err(&isc->v4l2_dev, "video_register_device failed: %d\n", ret); diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h index bfaed2fad2b5..fc56a745c7d1 100644 --- a/drivers/media/platform/atmel/atmel-isc.h +++ b/drivers/media/platform/atmel/atmel-isc.h @@ -213,7 +213,6 @@ struct isc_device { struct fmt_config try_config; struct isc_ctrls ctrls; - struct v4l2_ctrl *do_wb_ctrl; struct work_struct awb_work; struct mutex lock; /* serialize access to file operations */ @@ -223,6 +222,28 @@ struct isc_device { struct isc_subdev_entity *current_subdev; struct list_head subdev_entities; + + struct { +#define ISC_CTRL_DO_WB 1 +#define ISC_CTRL_R_GAIN 2 +#define ISC_CTRL_B_GAIN 3 +#define ISC_CTRL_GR_GAIN 4 +#define ISC_CTRL_GB_GAIN 5 +#define ISC_CTRL_R_OFF 6 +#define ISC_CTRL_B_OFF 7 +#define ISC_CTRL_GR_OFF 8 +#define ISC_CTRL_GB_OFF 9 + struct v4l2_ctrl *awb_ctrl; + struct v4l2_ctrl *do_wb_ctrl; + struct v4l2_ctrl *r_gain_ctrl; + struct v4l2_ctrl *b_gain_ctrl; + struct v4l2_ctrl *gr_gain_ctrl; + struct v4l2_ctrl *gb_gain_ctrl; + struct v4l2_ctrl *r_off_ctrl; + struct v4l2_ctrl *b_off_ctrl; + struct v4l2_ctrl *gr_off_ctrl; + struct v4l2_ctrl *gb_off_ctrl; + }; }; #define GAMMA_MAX 2 diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index 963dfd6e750e..d74aa73f26be 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -1094,7 +1094,7 @@ static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier) return ret; } - ret = video_register_device(isi->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(isi->vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(isi->dev, "Failed to register video device\n"); return ret; diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index acff10ad257a..d0d093dd8f7c 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -2726,7 +2726,7 @@ static int coda_register_device(struct coda_dev *dev, int i) v4l2_disable_ioctl(vfd, VIDIOC_G_CROP); v4l2_disable_ioctl(vfd, VIDIOC_S_CROP); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (!ret) v4l2_info(&dev->v4l2_dev, "%s registered as %s\n", type == CODA_INST_ENCODER ? "encoder" : "decoder", diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index b49378b18e5d..c98edb67cfb2 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -29,7 +29,7 @@ #include "ccdc_hw_device.h" /* Defaults for module configuration parameters */ -static struct isif_config_params_raw isif_config_defaults = { +static const struct isif_config_params_raw isif_config_defaults = { .linearize = { .en = 0, .corr_shft = ISIF_NO_SHIFT, diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index ae419958e420..38d3088d4d38 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -1339,7 +1339,7 @@ static int register_device(struct vpbe_layer *vpbe_display_layer, vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue; err = video_register_device(&vpbe_display_layer->video_dev, - VFL_TYPE_GRABBER, + VFL_TYPE_VIDEO, -1); if (err) return -ENODEV; diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 9b1d9643589b..f9f7dd17c57c 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -880,7 +880,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv, /* Fill in the information about format */ pix_fmt = vpfe_lookup_pix_format(pix); if (pix_fmt) { - fmt->pixelformat = fmt->pixelformat; + fmt->pixelformat = pix_fmt->pixelformat; return 0; } return -EINVAL; @@ -1780,7 +1780,7 @@ static int vpfe_probe(struct platform_device *pdev) "video_dev=%p\n", &vpfe_dev->video_dev); vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = video_register_device(&vpfe_dev->video_dev, - VFL_TYPE_GRABBER, -1); + VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(pdev->dev.driver, diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 71f4fe882d13..d9ec439faefa 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1466,7 +1466,7 @@ static int vpif_probe_complete(void) vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; video_set_drvdata(&ch->video_dev, ch); err = video_register_device(vdev, - VFL_TYPE_GRABBER, (j ? 1 : 0)); + VFL_TYPE_VIDEO, (j ? 1 : 0)); if (err) goto probe_out; } diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index abbdbac08e6f..ead14c49d4f5 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -1214,7 +1214,7 @@ static int vpif_probe_complete(void) vdev->lock = &common->lock; vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; video_set_drvdata(&ch->video_dev, ch); - err = video_register_device(vdev, VFL_TYPE_GRABBER, + err = video_register_device(vdev, VFL_TYPE_VIDEO, (j ? 3 : 2)); if (err < 0) goto probe_out; diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index 35a1d0d6dd66..e2c162635f72 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -771,7 +771,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc) return PTR_ERR(gsc->m2m.m2m_dev); } - ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&gsc->vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(&pdev->dev, "%s(): failed to register video device\n", __func__); diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig index 989cb34f19b1..be4effcbfe7b 100644 --- a/drivers/media/platform/exynos4-is/Kconfig +++ b/drivers/media/platform/exynos4-is/Kconfig @@ -13,7 +13,7 @@ config VIDEO_SAMSUNG_EXYNOS4_IS if VIDEO_SAMSUNG_EXYNOS4_IS config VIDEO_EXYNOS4_IS_COMMON - tristate + tristate config VIDEO_S5P_FIMC tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver" diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c index 121d609ff856..705f182330ca 100644 --- a/drivers/media/platform/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/exynos4-is/fimc-capture.c @@ -1808,7 +1808,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc, if (ret) goto err_me_cleanup; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) goto err_ctrl_free; diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index d2cbcdca0463..15f443fa7208 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -619,7 +619,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp, video_set_drvdata(vdev, isp); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { media_entity_cleanup(&vdev->entity); return ret; diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index e87c6a09205b..394e0818f2d5 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -1297,7 +1297,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd) video_set_drvdata(vfd, fimc); fimc->ve.pipe = v4l2_get_subdev_hostdata(sd); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret < 0) { media_entity_cleanup(&vfd->entity); fimc->ve.pipe = NULL; @@ -1614,6 +1614,9 @@ static int fimc_lite_remove(struct platform_device *pdev) struct fimc_lite *fimc = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + if (!pm_runtime_enabled(dev)) + clk_disable_unprepare(fimc->clock); + pm_runtime_disable(dev); pm_runtime_set_suspended(dev); fimc_lite_unregister_capture_subdev(fimc); diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c index c70c2cbe3eb1..4acb179556c4 100644 --- a/drivers/media/platform/exynos4-is/fimc-m2m.c +++ b/drivers/media/platform/exynos4-is/fimc-m2m.c @@ -746,7 +746,7 @@ int fimc_register_m2m_device(struct fimc_dev *fimc, if (ret) goto err_me; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) goto err_vd; diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index 81a8faedbba6..84633a3b8475 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -1486,7 +1486,7 @@ static int viu_of_probe(struct platform_device *op) mutex_lock(&viu_dev->lock); - ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(viu_dev->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { video_device_release(viu_dev->vdev); goto err_unlock; diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c index 38d942322302..08d76eb05ed1 100644 --- a/drivers/media/platform/imx-pxp.c +++ b/drivers/media/platform/imx-pxp.c @@ -1709,7 +1709,7 @@ static int pxp_probe(struct platform_device *pdev) goto err_v4l2; } - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_m2m; diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 9ad24c86c5ab..1f89e71cdccf 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -953,7 +953,7 @@ static int deinterlace_probe(struct platform_device *pdev) vfd->lock = &pcdev->dev_mutex; vfd->v4l2_dev = &pcdev->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n"); goto unreg_dev; diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 803baf97f06e..09775b6624c6 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1802,7 +1802,7 @@ static int mccic_notify_bound(struct v4l2_async_notifier *notifier, cam->vdev.lock = &cam->s_mutex; cam->vdev.queue = &cam->vb_queue; video_set_drvdata(&cam->vdev, cam); - ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1); if (ret) { cam->sensor = NULL; goto out; diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index ee802fc3bcdf..f82a81a3bdee 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -1150,7 +1150,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev) jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; - ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_GRABBER, 3); + ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_VIDEO, 3); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); goto err_dec_vdev_register; diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c index 9afe8161a8c0..14991685adb7 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c @@ -110,6 +110,12 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node, for (i = 0; i < ARRAY_SIZE(comp->clk); i++) { comp->clk[i] = of_clk_get(node, i); + if (IS_ERR(comp->clk[i])) { + if (PTR_ERR(comp->clk[i]) != -EPROBE_DEFER) + dev_err(dev, "Failed to get clock\n"); + + return PTR_ERR(comp->clk[i]); + } /* Only RDMA needs two clocks */ if (comp->type != MTK_MDP_RDMA) diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c index 7c9e2d69e21a..821f2cf325f0 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c @@ -1229,7 +1229,7 @@ int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp) goto err_m2m_init; } - ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2); + ret = video_register_device(mdp->vdev, VFL_TYPE_VIDEO, 2); if (ret) { dev_err(dev, "failed to register video device\n"); goto err_vdev_register; diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c index 6720d11f50cf..b065ccd06914 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c @@ -15,7 +15,7 @@ static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu) return container_of(vpu, struct mtk_mdp_ctx, vpu); } -static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg) +static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg) { struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *) (unsigned long)msg->ap_inst; @@ -26,10 +26,11 @@ static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg) vpu->inst_addr = msg->vpu_inst_addr; } -static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv) +static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len, + void *priv) { - unsigned int msg_id = *(unsigned int *)data; - struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data; + const struct mdp_ipi_comm_ack *msg = data; + unsigned int msg_id = msg->msg_id; struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *) (unsigned long)msg->ap_inst; struct mtk_mdp_ctx *ctx; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c index 100ae8c5e702..97a1b6664c20 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c @@ -331,7 +331,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev) goto err_event_workq; } - ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0); if (ret) { mtk_v4l2_err("Failed to register video device"); goto err_dec_reg; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c index 1d82aa2b6017..4d31f1ed113f 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c @@ -356,7 +356,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev) goto err_event_workq; } - ret = video_register_device(vfd_enc, VFL_TYPE_GRABBER, 1); + ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, 1); if (ret) { mtk_v4l2_err("Failed to register video device"); goto err_enc_reg; diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c index 24c1f0bf2147..257a5b5ad212 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c @@ -110,7 +110,11 @@ struct vp9_sf_ref_fb { * @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W) * @profile : profile sparsed from vpu (AP-R, VPU-W) - * @show_frame : display this frame or not (AP-R, VPU-W) + * @show_frame : [BIT(0)] display this frame or not (AP-R, VPU-W) + * [BIT(1)] reset segment data or not (AP-R, VPU-W) + * [BIT(2)] trig decoder hardware or not (AP-R, VPU-W) + * [BIT(3)] ask VPU to set bits(0~4) accordingly (AP-W, VPU-R) + * [BIT(4)] do not reset segment data before every frame (AP-R, VPU-W) * @show_existing_frame : inform this frame is show existing frame * (AP-R, VPU-W) * @frm_to_show_idx : index to show frame (AP-R, VPU-W) @@ -494,12 +498,12 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst) frm_to_show->fb->base_y.size); } if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) { - if (vsi->show_frame) + if (vsi->show_frame & BIT(0)) vp9_add_to_fb_disp_list(inst, inst->cur_fb); } } else { if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) { - if (vsi->show_frame) + if (vsi->show_frame & BIT(0)) vp9_add_to_fb_disp_list(inst, frm_to_show->fb); } } @@ -800,6 +804,9 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx) } inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi; + + inst->vsi->show_frame |= BIT(3); + init_all_fb_lists(inst); ctx->drv_handle = inst; @@ -870,13 +877,27 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs, vsi->sf_frm_sz[idx]); } } - memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size); + + if (!(vsi->show_frame & BIT(4))) + memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size); + ret = vpu_dec_start(&inst->vpu, data, 3); if (ret) { mtk_vcodec_err(inst, "vpu_dec_start failed"); goto DECODE_ERROR; } + if (vsi->show_frame & BIT(1)) { + memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size); + + if (vsi->show_frame & BIT(2)) { + if (vpu_dec_start(&inst->vpu, NULL, 0)) { + mtk_vcodec_err(inst, "vpu trig decoder failed"); + goto DECODE_ERROR; + } + } + } + ret = validate_vsi_array_indexes(inst, vsi); if (ret) { mtk_vcodec_err(inst, "Invalid values from VPU."); diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c index 70abfd4cd4b9..948a12fd9d46 100644 --- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c @@ -9,7 +9,7 @@ #include "vdec_ipi_msg.h" #include "vdec_vpu_if.h" -static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg) +static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg) { struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *) (unsigned long)msg->ap_inst_addr; @@ -34,9 +34,9 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg) * This function runs in interrupt context and it means there's an IPI MSG * from VPU. */ -static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv) +static void vpu_dec_ipi_handler(const void *data, unsigned int len, void *priv) { - struct vdec_vpu_ipi_ack *msg = data; + const struct vdec_vpu_ipi_ack *msg = data; struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *) (unsigned long)msg->ap_inst_addr; diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c index 3e931b0ed096..9540709c1905 100644 --- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c +++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c @@ -8,26 +8,26 @@ #include "venc_ipi_msg.h" #include "venc_vpu_if.h" -static void handle_enc_init_msg(struct venc_vpu_inst *vpu, void *data) +static void handle_enc_init_msg(struct venc_vpu_inst *vpu, const void *data) { - struct venc_vpu_ipi_msg_init *msg = data; + const struct venc_vpu_ipi_msg_init *msg = data; vpu->inst_addr = msg->vpu_inst_addr; vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr); } -static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, void *data) +static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data) { - struct venc_vpu_ipi_msg_enc *msg = data; + const struct venc_vpu_ipi_msg_enc *msg = data; vpu->state = msg->state; vpu->bs_size = msg->bs_size; vpu->is_key_frm = msg->is_key_frm; } -static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv) +static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv) { - struct venc_vpu_ipi_msg_common *msg = data; + const struct venc_vpu_ipi_msg_common *msg = data; struct venc_vpu_inst *vpu = (struct venc_vpu_inst *)(unsigned long)msg->venc_inst; diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index a768707abb94..d30c08983f56 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -46,6 +46,8 @@ /* binary firmware name */ #define VPU_P_FW "vpu_p.bin" #define VPU_D_FW "vpu_d.bin" +#define VPU_P_FW_NEW "mediatek/mt8173/vpu_p.bin" +#define VPU_D_FW_NEW "mediatek/mt8173/vpu_d.bin" #define VPU_RESET 0x0 #define VPU_TCM_CFG 0x0008 @@ -203,8 +205,8 @@ struct mtk_vpu { struct vpu_run run; struct vpu_wdt wdt; struct vpu_ipi_desc ipi_desc[IPI_MAX]; - struct share_obj *recv_buf; - struct share_obj *send_buf; + struct share_obj __iomem *recv_buf; + struct share_obj __iomem *send_buf; struct device *dev; struct clk *clk; bool fw_loaded; @@ -292,7 +294,7 @@ int vpu_ipi_send(struct platform_device *pdev, unsigned int len) { struct mtk_vpu *vpu = platform_get_drvdata(pdev); - struct share_obj *send_obj = vpu->send_buf; + struct share_obj __iomem *send_obj = vpu->send_buf; unsigned long timeout; int ret = 0; @@ -325,9 +327,9 @@ int vpu_ipi_send(struct platform_device *pdev, } } while (vpu_cfg_readl(vpu, HOST_TO_VPU)); - memcpy((void *)send_obj->share_buf, buf, len); - send_obj->len = len; - send_obj->id = id; + memcpy_toio(send_obj->share_buf, buf, len); + writel(len, &send_obj->len); + writel(id, &send_obj->id); vpu->ipi_id_ack[id] = false; /* send the command to VPU */ @@ -477,16 +479,24 @@ static int load_requested_vpu(struct mtk_vpu *vpu, size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE; size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE; char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW; + char *fw_new_name = fw_type ? VPU_D_FW_NEW : VPU_P_FW_NEW; const struct firmware *vpu_fw; size_t dl_size = 0; size_t extra_fw_size = 0; void *dest; int ret; - ret = request_firmware(&vpu_fw, fw_name, vpu->dev); + ret = request_firmware(&vpu_fw, fw_new_name, vpu->dev); if (ret < 0) { - dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret); - return ret; + dev_info(vpu->dev, "Failed to load %s, %d, retry\n", + fw_new_name, ret); + + ret = request_firmware(&vpu_fw, fw_name, vpu->dev); + if (ret < 0) { + dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, + ret); + return ret; + } } dl_size = vpu_fw->size; if (dl_size > fw_size) { @@ -600,10 +610,10 @@ OUT_LOAD_FW: } EXPORT_SYMBOL_GPL(vpu_load_firmware); -static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv) +static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv) { - struct mtk_vpu *vpu = (struct mtk_vpu *)priv; - struct vpu_run *run = (struct vpu_run *)data; + struct mtk_vpu *vpu = priv; + const struct vpu_run *run = data; vpu->run.signaled = run->signaled; strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver)); @@ -700,19 +710,21 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type) static void vpu_ipi_handler(struct mtk_vpu *vpu) { - struct share_obj *rcv_obj = vpu->recv_buf; + struct share_obj __iomem *rcv_obj = vpu->recv_buf; struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc; - - if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) { - ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf, - rcv_obj->len, - ipi_desc[rcv_obj->id].priv); - if (rcv_obj->id > IPI_VPU_INIT) { - vpu->ipi_id_ack[rcv_obj->id] = true; + unsigned char data[SHARE_BUF_SIZE]; + s32 id = readl(&rcv_obj->id); + + memcpy_fromio(data, rcv_obj->share_buf, sizeof(data)); + if (id < IPI_MAX && ipi_desc[id].handler) { + ipi_desc[id].handler(data, readl(&rcv_obj->len), + ipi_desc[id].priv); + if (id > IPI_VPU_INIT) { + vpu->ipi_id_ack[id] = true; wake_up(&vpu->ack_wq); } } else { - dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id); + dev_err(vpu->dev, "No such ipi id = %d\n", id); } } @@ -722,11 +734,10 @@ static int vpu_ipi_init(struct mtk_vpu *vpu) vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST); /* shared buffer initialization */ - vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm + - VPU_DTCM_OFFSET); + vpu->recv_buf = vpu->reg.tcm + VPU_DTCM_OFFSET; vpu->send_buf = vpu->recv_buf + 1; - memset(vpu->recv_buf, 0, sizeof(struct share_obj)); - memset(vpu->send_buf, 0, sizeof(struct share_obj)); + memset_io(vpu->recv_buf, 0, sizeof(struct share_obj)); + memset_io(vpu->send_buf, 0, sizeof(struct share_obj)); return 0; } diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h index d4453b4bcee9..ee7c552ce928 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.h +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h @@ -15,7 +15,7 @@ * VPU interfaces with other blocks by share memory and interrupt. **/ -typedef void (*ipi_handler_t) (void *data, +typedef void (*ipi_handler_t) (const void *data, unsigned int len, void *priv); diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 27779b75df54..df78df59da45 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -866,7 +866,7 @@ static int emmaprp_probe(struct platform_device *pdev) goto rel_vdev; } - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n"); goto rel_m2m; diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 513b99bf963b..21193f0b7f61 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -1500,7 +1500,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev) /* Register the Video device with V4L2 */ vfd = vout->vfd; - if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) { + if (video_register_device(vfd, VFL_TYPE_VIDEO, -1) < 0) { dev_err(&pdev->dev, ": Could not register Video for Linux device\n"); vfd->minor = -1; diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 471ae7cdb813..0fbb2aa6dd2c 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -1312,6 +1312,10 @@ static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable) { struct isp_device *isp = to_isp_device(ccdc); + /* Avoid restarting the CCDC when streaming is stopping. */ + if (enable && ccdc->stopping & CCDC_STOP_REQUEST) + return; + isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index ee183c35ff3b..6f769c527fae 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -1311,7 +1311,7 @@ static int isp_video_open(struct file *file) goto done; } - ret = v4l2_pipeline_pm_use(&video->video.entity, 1); + ret = v4l2_pipeline_pm_get(&video->video.entity); if (ret < 0) { omap3isp_put(video->isp); goto done; @@ -1363,7 +1363,7 @@ static int isp_video_release(struct file *file) vb2_queue_release(&handle->queue); mutex_unlock(&video->queue_lock); - v4l2_pipeline_pm_use(&video->video.entity, 0); + v4l2_pipeline_pm_put(&video->video.entity); /* Release the file handle. */ v4l2_fh_del(vfh); @@ -1453,7 +1453,7 @@ int omap3isp_video_init(struct isp_video *video, const char *name) video->video.fops = &isp_video_fops; snprintf(video->video.name, sizeof(video->video.name), "OMAP3 ISP %s %s", name, direction); - video->video.vfl_type = VFL_TYPE_GRABBER; + video->video.vfl_type = VFL_TYPE_VIDEO; video->video.release = video_device_release_empty; video->video.ioctl_ops = &isp_video_ioctl_ops; if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) @@ -1484,7 +1484,7 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) video->video.v4l2_dev = vdev; - ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) dev_err(video->isp->dev, "%s: could not register video device (%d)\n", diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 43ae645d866b..70c85a2a10f5 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -2191,7 +2191,7 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier, if (err) goto out_sensor_poweroff; - err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&pcdev->vdev, VFL_TYPE_VIDEO, -1); if (err) { v4l2_err(v4l2_dev, "register video device failed: %d\n", err); pcdev->sensor = NULL; @@ -2440,23 +2440,23 @@ static int pxa_camera_probe(struct platform_device *pdev) pcdev->base = base; /* request dma */ - pcdev->dma_chans[0] = dma_request_slave_channel(&pdev->dev, "CI_Y"); - if (!pcdev->dma_chans[0]) { + pcdev->dma_chans[0] = dma_request_chan(&pdev->dev, "CI_Y"); + if (IS_ERR(pcdev->dma_chans[0])) { dev_err(&pdev->dev, "Can't request DMA for Y\n"); - return -ENODEV; + return PTR_ERR(pcdev->dma_chans[0]); } - pcdev->dma_chans[1] = dma_request_slave_channel(&pdev->dev, "CI_U"); - if (!pcdev->dma_chans[1]) { - dev_err(&pdev->dev, "Can't request DMA for Y\n"); - err = -ENODEV; + pcdev->dma_chans[1] = dma_request_chan(&pdev->dev, "CI_U"); + if (IS_ERR(pcdev->dma_chans[1])) { + dev_err(&pdev->dev, "Can't request DMA for U\n"); + err = PTR_ERR(pcdev->dma_chans[1]); goto exit_free_dma_y; } - pcdev->dma_chans[2] = dma_request_slave_channel(&pdev->dev, "CI_V"); - if (!pcdev->dma_chans[2]) { + pcdev->dma_chans[2] = dma_request_chan(&pdev->dev, "CI_V"); + if (IS_ERR(pcdev->dma_chans[2])) { dev_err(&pdev->dev, "Can't request DMA for V\n"); - err = -ENODEV; + err = PTR_ERR(pcdev->dma_chans[2]); goto exit_free_dma_u; } diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 1d50dfbbb762..cdbd6dba1122 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -745,7 +745,7 @@ static int video_open(struct file *file) file->private_data = vfh; - ret = v4l2_pipeline_pm_use(&vdev->entity, 1); + ret = v4l2_pipeline_pm_get(&vdev->entity); if (ret < 0) { dev_err(video->camss->dev, "Failed to power up pipeline: %d\n", ret); @@ -771,7 +771,7 @@ static int video_release(struct file *file) vb2_fop_release(file); - v4l2_pipeline_pm_use(&vdev->entity, 0); + v4l2_pipeline_pm_put(&vdev->entity); file->private_data = NULL; @@ -921,7 +921,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, vdev->lock = &video->lock; strscpy(vdev->name, name, sizeof(vdev->name)); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(v4l2_dev->dev, "Failed to register video device: %d\n", ret); diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile index b44b11b03e12..64af0bc1edae 100644 --- a/drivers/media/platform/qcom/venus/Makefile +++ b/drivers/media/platform/qcom/venus/Makefile @@ -3,7 +3,7 @@ venus-core-objs += core.o helpers.o firmware.o \ hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \ - hfi_parser.o + hfi_parser.o pm_helpers.o venus-dec-objs += vdec.o vdec_ctrls.o venus-enc-objs += venc.o venc_ctrls.o diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 07312a2fab24..194b10b98767 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -3,7 +3,6 @@ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * Copyright (C) 2017 Linaro Ltd. */ -#include <linux/clk.h> #include <linux/init.h> #include <linux/interconnect.h> #include <linux/ioctl.h> @@ -19,9 +18,8 @@ #include <media/v4l2-ioctl.h> #include "core.h" -#include "vdec.h" -#include "venc.h" #include "firmware.h" +#include "pm_helpers.h" static void venus_event_notify(struct venus_core *core, u32 event) { @@ -100,50 +98,6 @@ static void venus_sys_error_handler(struct work_struct *work) mutex_unlock(&core->lock); } -static int venus_clks_get(struct venus_core *core) -{ - const struct venus_resources *res = core->res; - struct device *dev = core->dev; - unsigned int i; - - for (i = 0; i < res->clks_num; i++) { - core->clks[i] = devm_clk_get(dev, res->clks[i]); - if (IS_ERR(core->clks[i])) - return PTR_ERR(core->clks[i]); - } - - return 0; -} - -static int venus_clks_enable(struct venus_core *core) -{ - const struct venus_resources *res = core->res; - unsigned int i; - int ret; - - for (i = 0; i < res->clks_num; i++) { - ret = clk_prepare_enable(core->clks[i]); - if (ret) - goto err; - } - - return 0; -err: - while (i--) - clk_disable_unprepare(core->clks[i]); - - return ret; -} - -static void venus_clks_disable(struct venus_core *core) -{ - const struct venus_resources *res = core->res; - unsigned int i = res->clks_num; - - while (i--) - clk_disable_unprepare(core->clks[i]); -} - static u32 to_v4l2_codec_type(u32 codec) { switch (codec) { @@ -256,9 +210,15 @@ static int venus_probe(struct platform_device *pdev) if (!core->res) return -ENODEV; - ret = venus_clks_get(core); - if (ret) - return ret; + core->pm_ops = venus_pm_get(core->res->hfi_version); + if (!core->pm_ops) + return -ENODEV; + + if (core->pm_ops->core_get) { + ret = core->pm_ops->core_get(dev); + if (ret) + return ret; + } ret = dma_set_mask_and_coherent(dev, core->res->dma_mask); if (ret) @@ -350,6 +310,7 @@ err_runtime_disable: static int venus_remove(struct platform_device *pdev) { struct venus_core *core = platform_get_drvdata(pdev); + const struct venus_pm_ops *pm_ops = core->pm_ops; struct device *dev = core->dev; int ret; @@ -368,6 +329,9 @@ static int venus_remove(struct platform_device *pdev) pm_runtime_put_sync(dev); pm_runtime_disable(dev); + if (pm_ops->core_put) + pm_ops->core_put(dev); + icc_put(core->video_path); icc_put(core->cpucfg_path); @@ -379,11 +343,15 @@ static int venus_remove(struct platform_device *pdev) static __maybe_unused int venus_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; int ret; ret = hfi_core_suspend(core); + if (ret) + return ret; - venus_clks_disable(core); + if (pm_ops->core_power) + ret = pm_ops->core_power(dev, POWER_OFF); return ret; } @@ -391,21 +359,16 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) static __maybe_unused int venus_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; int ret; - ret = venus_clks_enable(core); - if (ret) - return ret; - - ret = hfi_core_resume(core, false); - if (ret) - goto err_clks_disable; - - return 0; + if (pm_ops->core_power) { + ret = pm_ops->core_power(dev, POWER_ON); + if (ret) + return ret; + } -err_clks_disable: - venus_clks_disable(core); - return ret; + return hfi_core_resume(core, false); } static const struct dev_pm_ops venus_pm_ops = { @@ -463,6 +426,9 @@ static const struct venus_resources msm8996_res = { .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset), .clks = {"core", "iface", "bus", "mbus" }, .clks_num = 4, + .vcodec0_clks = { "core" }, + .vcodec1_clks = { "core" }, + .vcodec_clks_num = 1, .max_load = 2563200, .hfi_version = HFI_VERSION_3XX, .vmem_id = VIDC_RESOURCE_NONE, @@ -517,6 +483,35 @@ static const struct venus_resources sdm845_res = { .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data), .clks = {"core", "iface", "bus" }, .clks_num = 3, + .vcodec0_clks = { "core", "bus" }, + .vcodec1_clks = { "core", "bus" }, + .vcodec_clks_num = 2, + .max_load = 3110400, /* 4096x2160@90 */ + .hfi_version = HFI_VERSION_4XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .fwname = "qcom/venus-5.2/venus.mdt", +}; + +static const struct venus_resources sdm845_res_v2 = { + .freq_tbl = sdm845_freq_table, + .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table), + .bw_tbl_enc = sdm845_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc), + .bw_tbl_dec = sdm845_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec), + .codec_freq_data = sdm845_codec_freq_data, + .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data), + .clks = {"core", "iface", "bus" }, + .clks_num = 3, + .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, + .vcodec1_clks = { "vcodec1_core", "vcodec1_bus" }, + .vcodec_clks_num = 2, + .vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" }, + .vcodec_pmdomains_num = 3, + .vcodec_num = 2, .max_load = 3110400, /* 4096x2160@90 */ .hfi_version = HFI_VERSION_4XX, .vmem_id = VIDC_RESOURCE_NONE, @@ -526,10 +521,56 @@ static const struct venus_resources sdm845_res = { .fwname = "qcom/venus-5.2/venus.mdt", }; +static const struct freq_tbl sc7180_freq_table[] = { + { 0, 500000000 }, + { 0, 434000000 }, + { 0, 340000000 }, + { 0, 270000000 }, + { 0, 150000000 }, +}; + +static const struct bw_tbl sc7180_bw_table_enc[] = { + { 972000, 750000, 0, 0, 0 }, /* 3840x2160@30 */ + { 489600, 451000, 0, 0, 0 }, /* 1920x1080@60 */ + { 244800, 234000, 0, 0, 0 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sc7180_bw_table_dec[] = { + { 1036800, 1386000, 0, 1875000, 0 }, /* 4096x2160@30 */ + { 489600, 865000, 0, 1146000, 0 }, /* 1920x1080@60 */ + { 244800, 530000, 0, 583000, 0 }, /* 1920x1080@30 */ +}; + +static const struct venus_resources sc7180_res = { + .freq_tbl = sc7180_freq_table, + .freq_tbl_size = ARRAY_SIZE(sc7180_freq_table), + .bw_tbl_enc = sc7180_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sc7180_bw_table_enc), + .bw_tbl_dec = sc7180_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sc7180_bw_table_dec), + .codec_freq_data = sdm845_codec_freq_data, + .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data), + .clks = {"core", "iface", "bus" }, + .clks_num = 3, + .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, + .vcodec_clks_num = 2, + .vcodec_pmdomains = { "venus", "vcodec0" }, + .vcodec_pmdomains_num = 2, + .vcodec_num = 1, + .hfi_version = HFI_VERSION_4XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .fwname = "qcom/venus-5.4/venus.mdt", +}; + static const struct of_device_id venus_dt_match[] = { { .compatible = "qcom,msm8916-venus", .data = &msm8916_res, }, { .compatible = "qcom,msm8996-venus", .data = &msm8996_res, }, { .compatible = "qcom,sdm845-venus", .data = &sdm845_res, }, + { .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, }, + { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, }, { } }; MODULE_DEVICE_TABLE(of, venus_dt_match); diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 11585fb3cae3..bd3ac6a4501f 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -14,7 +14,9 @@ #include "hfi.h" -#define VIDC_CLKS_NUM_MAX 4 +#define VIDC_CLKS_NUM_MAX 4 +#define VIDC_VCODEC_CLKS_NUM_MAX 2 +#define VIDC_PMDOMAINS_NUM_MAX 3 struct freq_tbl { unsigned int load; @@ -55,6 +57,12 @@ struct venus_resources { unsigned int codec_freq_data_size; const char * const clks[VIDC_CLKS_NUM_MAX]; unsigned int clks_num; + const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + const char * const vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + unsigned int vcodec_clks_num; + const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX]; + unsigned int vcodec_pmdomains_num; + unsigned int vcodec_num; enum hfi_version hfi_version; u32 max_load; unsigned int vmem_id; @@ -100,10 +108,10 @@ struct venus_caps { * @base: IO memory base address * @irq: Venus irq * @clks: an array of struct clk pointers - * @core0_clk: a struct clk pointer for core0 - * @core1_clk: a struct clk pointer for core1 - * @core0_bus_clk: a struct clk pointer for core0 bus clock - * @core1_bus_clk: a struct clk pointer for core1 bus clock + * @vcodec0_clks: an array of vcodec0 struct clk pointers + * @vcodec1_clks: an array of vcodec1 struct clk pointers + * @pd_dl_venus: pmdomain device-link for venus domain + * @pmdomains: an array of pmdomains struct device pointers * @vdev_dec: a reference to video device structure for decoder instances * @vdev_enc: a reference to video device structure for encoder instances * @v4l2_dev: a holder for v4l2 device structure @@ -132,12 +140,12 @@ struct venus_core { void __iomem *base; int irq; struct clk *clks[VIDC_CLKS_NUM_MAX]; - struct clk *core0_clk; - struct clk *core1_clk; - struct clk *core0_bus_clk; - struct clk *core1_bus_clk; + struct clk *vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + struct clk *vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX]; struct icc_path *video_path; struct icc_path *cpucfg_path; + struct device_link *pd_dl_venus; + struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX]; struct video_device *vdev_dec; struct video_device *vdev_enc; struct v4l2_device v4l2_dev; @@ -159,6 +167,7 @@ struct venus_core { unsigned int error; bool sys_error; const struct hfi_core_ops *core_ops; + const struct venus_pm_ops *pm_ops; unsigned long enc_codecs; unsigned long dec_codecs; unsigned int max_sessions_supported; @@ -172,6 +181,8 @@ struct venus_core { struct delayed_work work; struct venus_caps caps[MAX_CODEC_NUM]; unsigned int codecs_count; + unsigned int core0_usage_count; + unsigned int core1_usage_count; }; struct vdec_controls { @@ -187,6 +198,7 @@ struct venc_controls { u32 bitrate_mode; u32 bitrate; u32 bitrate_peak; + u32 rc_enable; u32 h264_i_period; u32 h264_entropy_mode; @@ -344,6 +356,7 @@ struct venus_inst { unsigned int subscriptions; int buf_count; struct venus_ts_metadata tss[VIDEO_MAX_FRAME]; + unsigned long payloads[VIDEO_MAX_FRAME]; u64 fps; struct v4l2_fract timeperframe; const struct venus_format *fmt_out; @@ -370,6 +383,8 @@ struct venus_inst { const struct hfi_inst_ops *ops; u32 session_type; union hfi_get_property hprop; + unsigned int core_acquired: 1; + unsigned int bit_depth; }; #define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX) diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index d3d1748a7ef6..8801a6a7543d 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -44,8 +44,14 @@ static void venus_reset_cpu(struct venus_core *core) int venus_set_hw_state(struct venus_core *core, bool resume) { - if (core->use_tz) - return qcom_scm_set_remote_state(resume, 0); + int ret; + + if (core->use_tz) { + ret = qcom_scm_set_remote_state(resume, 0); + if (resume && ret == -EINVAL) + ret = 0; + return ret; + } if (resume) venus_reset_cpu(core); @@ -100,8 +106,7 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, mem_va = memremap(r.start, *mem_size, MEMREMAP_WC); if (!mem_va) { - dev_err(dev, "unable to map memory region: %pa+%zx\n", - &r.start, *mem_size); + dev_err(dev, "unable to map memory region: %pR\n", &r); ret = -ENOMEM; goto err_release_fw; } diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index a172f1ac0b35..bcc603804041 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -3,12 +3,8 @@ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * Copyright (C) 2017 Linaro Ltd. */ -#include <linux/clk.h> -#include <linux/iopoll.h> -#include <linux/interconnect.h> #include <linux/list.h> #include <linux/mutex.h> -#include <linux/pm_runtime.h> #include <linux/slab.h> #include <media/videobuf2-dma-sg.h> #include <media/v4l2-mem2mem.h> @@ -17,7 +13,7 @@ #include "core.h" #include "helpers.h" #include "hfi_helper.h" -#include "hfi_venus_io.h" +#include "pm_helpers.h" struct intbuf { struct list_head list; @@ -360,266 +356,6 @@ err: } EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc); -static u32 load_per_instance(struct venus_inst *inst) -{ - u32 mbs; - - if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP)) - return 0; - - mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16); - - return mbs * inst->fps; -} - -static u32 load_per_type(struct venus_core *core, u32 session_type) -{ - struct venus_inst *inst = NULL; - u32 mbs_per_sec = 0; - - mutex_lock(&core->lock); - list_for_each_entry(inst, &core->instances, list) { - if (inst->session_type != session_type) - continue; - - mbs_per_sec += load_per_instance(inst); - } - mutex_unlock(&core->lock); - - return mbs_per_sec; -} - -static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak) -{ - const struct venus_resources *res = inst->core->res; - const struct bw_tbl *bw_tbl; - unsigned int num_rows, i; - - *avg = 0; - *peak = 0; - - if (mbs == 0) - return; - - if (inst->session_type == VIDC_SESSION_TYPE_ENC) { - num_rows = res->bw_tbl_enc_size; - bw_tbl = res->bw_tbl_enc; - } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) { - num_rows = res->bw_tbl_dec_size; - bw_tbl = res->bw_tbl_dec; - } else { - return; - } - - if (!bw_tbl || num_rows == 0) - return; - - for (i = 0; i < num_rows; i++) { - if (mbs > bw_tbl[i].mbs_per_sec) - break; - - if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) { - *avg = bw_tbl[i].avg_10bit; - *peak = bw_tbl[i].peak_10bit; - } else { - *avg = bw_tbl[i].avg; - *peak = bw_tbl[i].peak; - } - } -} - -static int load_scale_bw(struct venus_core *core) -{ - struct venus_inst *inst = NULL; - u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0; - - mutex_lock(&core->lock); - list_for_each_entry(inst, &core->instances, list) { - mbs_per_sec = load_per_instance(inst); - mbs_to_bw(inst, mbs_per_sec, &avg, &peak); - total_avg += avg; - total_peak += peak; - } - mutex_unlock(&core->lock); - - dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n", - total_avg, total_peak); - - return icc_set_bw(core->video_path, total_avg, total_peak); -} - -static int set_clk_freq(struct venus_core *core, unsigned long freq) -{ - struct clk *clk = core->clks[0]; - int ret; - - ret = clk_set_rate(clk, freq); - if (ret) - return ret; - - ret = clk_set_rate(core->core0_clk, freq); - if (ret) - return ret; - - ret = clk_set_rate(core->core1_clk, freq); - if (ret) - return ret; - - return 0; -} - -static int scale_clocks(struct venus_inst *inst) -{ - struct venus_core *core = inst->core; - const struct freq_tbl *table = core->res->freq_tbl; - unsigned int num_rows = core->res->freq_tbl_size; - unsigned long freq = table[0].freq; - struct device *dev = core->dev; - u32 mbs_per_sec; - unsigned int i; - int ret; - - mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + - load_per_type(core, VIDC_SESSION_TYPE_DEC); - - if (mbs_per_sec > core->res->max_load) - dev_warn(dev, "HW is overloaded, needed: %d max: %d\n", - mbs_per_sec, core->res->max_load); - - if (!mbs_per_sec && num_rows > 1) { - freq = table[num_rows - 1].freq; - goto set_freq; - } - - for (i = 0; i < num_rows; i++) { - if (mbs_per_sec > table[i].load) - break; - freq = table[i].freq; - } - -set_freq: - - ret = set_clk_freq(core, freq); - if (ret) { - dev_err(dev, "failed to set clock rate %lu (%d)\n", - freq, ret); - return ret; - } - - ret = load_scale_bw(core); - if (ret) { - dev_err(dev, "failed to set bandwidth (%d)\n", - ret); - return ret; - } - - return 0; -} - -static unsigned long calculate_inst_freq(struct venus_inst *inst, - unsigned long filled_len) -{ - unsigned long vpp_freq = 0, vsp_freq = 0; - u32 fps = (u32)inst->fps; - u32 mbs_per_sec; - - mbs_per_sec = load_per_instance(inst) / fps; - - vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq; - /* 21 / 20 is overhead factor */ - vpp_freq += vpp_freq / 20; - vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq; - - /* 10 / 7 is overhead factor */ - if (inst->session_type == VIDC_SESSION_TYPE_ENC) - vsp_freq += (inst->controls.enc.bitrate * 10) / 7; - else - vsp_freq += ((fps * filled_len * 8) * 10) / 7; - - return max(vpp_freq, vsp_freq); -} - -static int scale_clocks_v4(struct venus_inst *inst) -{ - struct venus_core *core = inst->core; - const struct freq_tbl *table = core->res->freq_tbl; - unsigned int num_rows = core->res->freq_tbl_size; - struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; - struct device *dev = core->dev; - unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; - unsigned long filled_len = 0; - struct venus_buffer *buf, *n; - struct vb2_buffer *vb; - int i, ret; - - v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { - vb = &buf->vb.vb2_buf; - filled_len = max(filled_len, vb2_get_plane_payload(vb, 0)); - } - - if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) - return 0; - - freq = calculate_inst_freq(inst, filled_len); - inst->clk_data.freq = freq; - - mutex_lock(&core->lock); - list_for_each_entry(inst, &core->instances, list) { - if (inst->clk_data.core_id == VIDC_CORE_ID_1) { - freq_core1 += inst->clk_data.freq; - } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) { - freq_core2 += inst->clk_data.freq; - } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) { - freq_core1 += inst->clk_data.freq; - freq_core2 += inst->clk_data.freq; - } - } - mutex_unlock(&core->lock); - - freq = max(freq_core1, freq_core2); - - if (freq >= table[0].freq) { - freq = table[0].freq; - dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n", - freq, table[0].freq); - goto set_freq; - } - - for (i = num_rows - 1 ; i >= 0; i--) { - if (freq <= table[i].freq) { - freq = table[i].freq; - break; - } - } - -set_freq: - - ret = set_clk_freq(core, freq); - if (ret) { - dev_err(dev, "failed to set clock rate %lu (%d)\n", - freq, ret); - return ret; - } - - ret = load_scale_bw(core); - if (ret) { - dev_err(dev, "failed to set bandwidth (%d)\n", - ret); - return ret; - } - - return 0; -} - -int venus_helper_load_scale_clocks(struct venus_inst *inst) -{ - if (IS_V4(inst->core)) - return scale_clocks_v4(inst); - - return scale_clocks(inst); -} -EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks); - static void fill_buffer_desc(const struct venus_buffer *buf, struct hfi_buffer_desc *bd, bool response) { @@ -723,7 +459,7 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) if (inst->session_type == VIDC_SESSION_TYPE_DEC) put_ts_metadata(inst, vbuf); - venus_helper_load_scale_clocks(inst); + venus_pm_load_scale(inst); } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { if (inst->session_type == VIDC_SESSION_TYPE_ENC) fdata.buffer_type = HFI_BUFFER_OUTPUT; @@ -890,6 +626,78 @@ static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height) max(extradata, y_stride * 48), SZ_4K); } +static u32 get_framesize_raw_p010(u32 width, u32 height) +{ + u32 y_plane, uv_plane, y_stride, uv_stride, y_sclines, uv_sclines; + + y_stride = ALIGN(width * 2, 256); + uv_stride = ALIGN(width * 2, 256); + y_sclines = ALIGN(height, 32); + uv_sclines = ALIGN((height + 1) >> 1, 16); + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines; + + return ALIGN((y_plane + uv_plane), SZ_4K); +} + +static u32 get_framesize_raw_p010_ubwc(u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_sclines, uv_sclines; + u32 y_ubwc_plane, uv_ubwc_plane; + u32 y_meta_stride, y_meta_scanlines; + u32 uv_meta_stride, uv_meta_scanlines; + u32 y_meta_plane, uv_meta_plane; + u32 size; + + y_stride = ALIGN(width * 2, 256); + uv_stride = ALIGN(width * 2, 256); + y_sclines = ALIGN(height, 16); + uv_sclines = ALIGN((height + 1) >> 1, 16); + + y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K); + uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K); + y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64); + y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16); + y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K); + uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 16), 64); + uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16); + uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane; + + return ALIGN(size, SZ_4K); +} + +static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_sclines, uv_sclines; + u32 y_ubwc_plane, uv_ubwc_plane; + u32 y_meta_stride, y_meta_scanlines; + u32 uv_meta_stride, uv_meta_scanlines; + u32 y_meta_plane, uv_meta_plane; + u32 extradata = SZ_16K; + u32 size; + + y_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256); + uv_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256); + y_sclines = ALIGN(height, 16); + uv_sclines = ALIGN((height + 1) >> 1, 16); + + y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K); + uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K); + y_meta_stride = ALIGN(DIV_ROUND_UP(width, 48), 64); + y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16); + y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K); + uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 24), 64); + uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16); + uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane; + size += max(extradata + SZ_8K, y_stride * 48); + + return ALIGN(size, SZ_4K); +} + u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height) { switch (hfi_fmt) { @@ -898,6 +706,12 @@ u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height) return get_framesize_raw_nv12(width, height); case HFI_COLOR_FORMAT_NV12_UBWC: return get_framesize_raw_nv12_ubwc(width, height); + case HFI_COLOR_FORMAT_P010: + return get_framesize_raw_p010(width, height); + case HFI_COLOR_FORMAT_P010_UBWC: + return get_framesize_raw_p010_ubwc(width, height); + case HFI_COLOR_FORMAT_YUV420_TP10_UBWC: + return get_framesize_raw_yuv420_tp10_ubwc(width, height); default: return 0; } @@ -987,21 +801,6 @@ int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode) } EXPORT_SYMBOL_GPL(venus_helper_set_work_mode); -int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage) -{ - const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE; - struct hfi_videocores_usage_type cu; - - inst->clk_data.core_id = usage; - if (!IS_V4(inst->core)) - return 0; - - cu.video_core_enable_mask = usage; - - return hfi_session_set_property(inst, ptype, &cu); -} -EXPORT_SYMBOL_GPL(venus_helper_set_core_usage); - int venus_helper_init_codec_freq_data(struct venus_inst *inst) { const struct codec_freq_data *data; @@ -1289,6 +1088,15 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb) } EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare); +static void cache_payload(struct venus_inst *inst, struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + unsigned int idx = vbuf->vb2_buf.index; + + if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->payloads[idx] = vb2_get_plane_payload(vb, 0); +} + void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -1300,6 +1108,8 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(m2m_ctx, vbuf); + cache_payload(inst, vb); + if (inst->session_type == VIDC_SESSION_TYPE_ENC && !(inst->streamon_out && inst->streamon_cap)) goto unlock; @@ -1354,7 +1164,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q) venus_helper_free_dpb_bufs(inst); - venus_helper_load_scale_clocks(inst); + venus_pm_load_scale(inst); INIT_LIST_HEAD(&inst->registeredbufs); } @@ -1365,6 +1175,8 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q) else inst->streamon_cap = 0; + venus_pm_release_core(inst); + mutex_unlock(&inst->lock); } EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming); @@ -1417,7 +1229,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst) if (ret) goto err_bufs_free; - venus_helper_load_scale_clocks(inst); + venus_pm_load_scale(inst); ret = hfi_session_load_res(inst); if (ret) @@ -1512,6 +1324,27 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt, if (!caps) return -EINVAL; + if (inst->bit_depth == VIDC_BITDEPTH_10 && + inst->session_type == VIDC_SESSION_TYPE_DEC) { + found_ubwc = + find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, + HFI_COLOR_FORMAT_YUV420_TP10_UBWC); + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, + HFI_COLOR_FORMAT_NV12); + if (found_ubwc && found) { + /* + * Hard-code DPB buffers to be 10bit UBWC and decoder + * output buffers in 8bit NV12 until V4L2 is able to + * expose compressed/tiled formats to applications. + */ + *out_fmt = HFI_COLOR_FORMAT_YUV420_TP10_UBWC; + *out2_fmt = HFI_COLOR_FORMAT_NV12; + return 0; + } + + return -EINVAL; + } + if (ubwc) { ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE; found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, @@ -1542,52 +1375,3 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt, return -EINVAL; } EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts); - -int venus_helper_power_enable(struct venus_core *core, u32 session_type, - bool enable) -{ - void __iomem *ctrl, *stat; - u32 val; - int ret; - - if (!IS_V3(core) && !IS_V4(core)) - return 0; - - if (IS_V3(core)) { - if (session_type == VIDC_SESSION_TYPE_DEC) - ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL; - else - ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL; - if (enable) - writel(0, ctrl); - else - writel(1, ctrl); - - return 0; - } - - if (session_type == VIDC_SESSION_TYPE_DEC) { - ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; - stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; - } else { - ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL; - stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS; - } - - if (enable) { - writel(0, ctrl); - - ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100); - if (ret) - return ret; - } else { - writel(1, ctrl); - - ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100); - if (ret) - return ret; - } - - return 0; -} -EXPORT_SYMBOL_GPL(venus_helper_power_enable); diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h index 34dcd0c13f06..b64875564064 100644 --- a/drivers/media/platform/qcom/venus/helpers.h +++ b/drivers/media/platform/qcom/venus/helpers.h @@ -34,7 +34,6 @@ int venus_helper_set_output_resolution(struct venus_inst *inst, u32 buftype); int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode); int venus_helper_init_codec_freq_data(struct venus_inst *inst); -int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage); int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, unsigned int output_bufs, unsigned int output2_bufs); @@ -53,14 +52,11 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt, u32 *out2_fmt, bool ubwc); int venus_helper_alloc_dpb_bufs(struct venus_inst *inst); int venus_helper_free_dpb_bufs(struct venus_inst *inst); -int venus_helper_power_enable(struct venus_core *core, u32 session_type, - bool enable); int venus_helper_intbufs_alloc(struct venus_inst *inst); int venus_helper_intbufs_free(struct venus_inst *inst); int venus_helper_intbufs_realloc(struct venus_inst *inst); int venus_helper_queue_dpb_bufs(struct venus_inst *inst); int venus_helper_unregister_bufs(struct venus_inst *inst); -int venus_helper_load_scale_clocks(struct venus_inst *inst); int venus_helper_process_initial_cap_bufs(struct venus_inst *inst); int venus_helper_process_initial_out_bufs(struct venus_inst *inst); void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us, diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c index 4f645076abfb..c67e412f8201 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -1207,6 +1207,8 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt, case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: + case HFI_PROPERTY_PARAM_VENC_SESSION_QP: + case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: /* not implemented on Venus 4xx */ return -ENOTSUPP; default: diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h index b70551e296b7..f6613df1d16b 100644 --- a/drivers/media/platform/qcom/venus/hfi_helper.h +++ b/drivers/media/platform/qcom/venus/hfi_helper.h @@ -550,6 +550,7 @@ struct hfi_bitrate { #define HFI_CAPABILITY_LCU_SIZE 0x14 #define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15 #define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16 +#define HFI_CAPABILITY_MAX_VIDEOCORES 0x2b struct hfi_capability { u32 capability_type; @@ -792,6 +793,9 @@ struct hfi_h264_vui_timing_info { u32 time_scale; }; +#define VIDC_BITDEPTH_8 0x00000 +#define VIDC_BITDEPTH_10 0x20002 + struct hfi_bit_depth { u32 buffer_type; u32 bit_depth; @@ -840,8 +844,10 @@ struct hfi_extradata_input_crop { #define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000 #define HFI_COLOR_FORMAT_YUV420_TP10 0x4002 +#define HFI_COLOR_FORMAT_P010 0x4003 #define HFI_COLOR_FORMAT_NV12_UBWC 0x8002 #define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002 +#define HFI_COLOR_FORMAT_P010_UBWC 0xc003 #define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010 struct hfi_uncompressed_format_select { diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index 2293d936e49c..7f515a4b9bd1 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data) if (IS_V1(core)) { core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC; core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; + core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; } } diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h index 3e931c747e19..264e6dd2415f 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.h +++ b/drivers/media/platform/qcom/venus/hfi_parser.h @@ -107,4 +107,9 @@ static inline u32 frate_step(struct venus_inst *inst) return cap_step(inst, HFI_CAPABILITY_FRAMERATE); } +static inline u32 core_num_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_MAX_VIDEOCORES); +} + #endif diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c new file mode 100644 index 000000000000..abf93158857b --- /dev/null +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -0,0 +1,959 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Linaro Ltd. + * + * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org> + */ +#include <linux/clk.h> +#include <linux/interconnect.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <media/v4l2-mem2mem.h> + +#include "core.h" +#include "hfi_parser.h" +#include "hfi_venus_io.h" +#include "pm_helpers.h" + +static bool legacy_binding; + +static int core_clks_get(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + struct device *dev = core->dev; + unsigned int i; + + for (i = 0; i < res->clks_num; i++) { + core->clks[i] = devm_clk_get(dev, res->clks[i]); + if (IS_ERR(core->clks[i])) + return PTR_ERR(core->clks[i]); + } + + return 0; +} + +static int core_clks_enable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + for (i = 0; i < res->clks_num; i++) { + ret = clk_prepare_enable(core->clks[i]); + if (ret) + goto err; + } + + return 0; +err: + while (i--) + clk_disable_unprepare(core->clks[i]); + + return ret; +} + +static void core_clks_disable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i = res->clks_num; + + while (i--) + clk_disable_unprepare(core->clks[i]); +} + +static int core_clks_set_rate(struct venus_core *core, unsigned long freq) +{ + struct clk *clk = core->clks[0]; + int ret; + + ret = clk_set_rate(clk, freq); + if (ret) + return ret; + + ret = clk_set_rate(core->vcodec0_clks[0], freq); + if (ret) + return ret; + + ret = clk_set_rate(core->vcodec1_clks[0], freq); + if (ret) + return ret; + + return 0; +} + +static int vcodec_clks_get(struct venus_core *core, struct device *dev, + struct clk **clks, const char * const *id) +{ + const struct venus_resources *res = core->res; + unsigned int i; + + for (i = 0; i < res->vcodec_clks_num; i++) { + if (!id[i]) + continue; + clks[i] = devm_clk_get(dev, id[i]); + if (IS_ERR(clks[i])) + return PTR_ERR(clks[i]); + } + + return 0; +} + +static int vcodec_clks_enable(struct venus_core *core, struct clk **clks) +{ + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + for (i = 0; i < res->vcodec_clks_num; i++) { + ret = clk_prepare_enable(clks[i]); + if (ret) + goto err; + } + + return 0; +err: + while (i--) + clk_disable_unprepare(clks[i]); + + return ret; +} + +static void vcodec_clks_disable(struct venus_core *core, struct clk **clks) +{ + const struct venus_resources *res = core->res; + unsigned int i = res->vcodec_clks_num; + + while (i--) + clk_disable_unprepare(clks[i]); +} + +static u32 load_per_instance(struct venus_inst *inst) +{ + u32 mbs; + + if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP)) + return 0; + + mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16); + + return mbs * inst->fps; +} + +static u32 load_per_type(struct venus_core *core, u32 session_type) +{ + struct venus_inst *inst = NULL; + u32 mbs_per_sec = 0; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->session_type != session_type) + continue; + + mbs_per_sec += load_per_instance(inst); + } + mutex_unlock(&core->lock); + + return mbs_per_sec; +} + +static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak) +{ + const struct venus_resources *res = inst->core->res; + const struct bw_tbl *bw_tbl; + unsigned int num_rows, i; + + *avg = 0; + *peak = 0; + + if (mbs == 0) + return; + + if (inst->session_type == VIDC_SESSION_TYPE_ENC) { + num_rows = res->bw_tbl_enc_size; + bw_tbl = res->bw_tbl_enc; + } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) { + num_rows = res->bw_tbl_dec_size; + bw_tbl = res->bw_tbl_dec; + } else { + return; + } + + if (!bw_tbl || num_rows == 0) + return; + + for (i = 0; i < num_rows; i++) { + if (mbs > bw_tbl[i].mbs_per_sec) + break; + + if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) { + *avg = bw_tbl[i].avg_10bit; + *peak = bw_tbl[i].peak_10bit; + } else { + *avg = bw_tbl[i].avg; + *peak = bw_tbl[i].peak; + } + } +} + +static int load_scale_bw(struct venus_core *core) +{ + struct venus_inst *inst = NULL; + u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + mbs_per_sec = load_per_instance(inst); + mbs_to_bw(inst, mbs_per_sec, &avg, &peak); + total_avg += avg; + total_peak += peak; + } + mutex_unlock(&core->lock); + + dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n", + total_avg, total_peak); + + return icc_set_bw(core->video_path, total_avg, total_peak); +} + +static int load_scale_v1(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; + unsigned long freq = table[0].freq; + struct device *dev = core->dev; + u32 mbs_per_sec; + unsigned int i; + int ret; + + mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + + load_per_type(core, VIDC_SESSION_TYPE_DEC); + + if (mbs_per_sec > core->res->max_load) + dev_warn(dev, "HW is overloaded, needed: %d max: %d\n", + mbs_per_sec, core->res->max_load); + + if (!mbs_per_sec && num_rows > 1) { + freq = table[num_rows - 1].freq; + goto set_freq; + } + + for (i = 0; i < num_rows; i++) { + if (mbs_per_sec > table[i].load) + break; + freq = table[i].freq; + } + +set_freq: + + ret = core_clks_set_rate(core, freq); + if (ret) { + dev_err(dev, "failed to set clock rate %lu (%d)\n", + freq, ret); + return ret; + } + + ret = load_scale_bw(core); + if (ret) { + dev_err(dev, "failed to set bandwidth (%d)\n", + ret); + return ret; + } + + return 0; +} + +static int core_get_v1(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + return core_clks_get(core); +} + +static int core_power_v1(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + if (on == POWER_ON) + ret = core_clks_enable(core); + else + core_clks_disable(core); + + return ret; +} + +static const struct venus_pm_ops pm_ops_v1 = { + .core_get = core_get_v1, + .core_power = core_power_v1, + .load_scale = load_scale_v1, +}; + +static void +vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable) +{ + void __iomem *ctrl; + + if (session_type == VIDC_SESSION_TYPE_DEC) + ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL; + else + ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL; + + if (enable) + writel(0, ctrl); + else + writel(1, ctrl); +} + +static int vdec_get_v3(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + return vcodec_clks_get(core, dev, core->vcodec0_clks, + core->res->vcodec0_clks); +} + +static int vdec_power_v3(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, true); + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec0_clks); + else + vcodec_clks_disable(core, core->vcodec0_clks); + + vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, false); + + return ret; +} + +static int venc_get_v3(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + return vcodec_clks_get(core, dev, core->vcodec1_clks, + core->res->vcodec1_clks); +} + +static int venc_power_v3(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, true); + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec1_clks); + else + vcodec_clks_disable(core, core->vcodec1_clks); + + vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, false); + + return ret; +} + +static const struct venus_pm_ops pm_ops_v3 = { + .core_get = core_get_v1, + .core_power = core_power_v1, + .vdec_get = vdec_get_v3, + .vdec_power = vdec_power_v3, + .venc_get = venc_get_v3, + .venc_power = venc_power_v3, + .load_scale = load_scale_v1, +}; + +static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable) +{ + void __iomem *ctrl, *stat; + u32 val; + int ret; + + if (coreid == VIDC_CORE_ID_1) { + ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; + stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; + } else { + ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL; + stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS; + } + + if (enable) { + writel(0, ctrl); + + ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100); + if (ret) + return ret; + } else { + writel(1, ctrl); + + ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100); + if (ret) + return ret; + } + + return 0; +} + +static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask) +{ + int ret; + + if (coreid_mask & VIDC_CORE_ID_1) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + vcodec_clks_disable(core, core->vcodec0_clks); + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); + if (ret) + return ret; + + ret = pm_runtime_put_sync(core->pmdomains[1]); + if (ret < 0) + return ret; + } + + if (coreid_mask & VIDC_CORE_ID_2) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + vcodec_clks_disable(core, core->vcodec1_clks); + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); + if (ret) + return ret; + + ret = pm_runtime_put_sync(core->pmdomains[2]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask) +{ + int ret; + + if (coreid_mask & VIDC_CORE_ID_1) { + ret = pm_runtime_get_sync(core->pmdomains[1]); + if (ret < 0) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + ret = vcodec_clks_enable(core, core->vcodec0_clks); + if (ret) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); + if (ret < 0) + return ret; + } + + if (coreid_mask & VIDC_CORE_ID_2) { + ret = pm_runtime_get_sync(core->pmdomains[2]); + if (ret < 0) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + ret = vcodec_clks_enable(core, core->vcodec1_clks); + if (ret) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load) +{ + u32 mbs_per_sec, load, core1_load = 0, core2_load = 0; + u32 cores_max = core_num_max(inst); + struct venus_core *core = inst->core; + struct venus_inst *inst_pos; + unsigned long vpp_freq; + u32 coreid; + + mutex_lock(&core->lock); + + list_for_each_entry(inst_pos, &core->instances, list) { + if (inst_pos == inst) + continue; + vpp_freq = inst_pos->clk_data.codec_freq_data->vpp_freq; + coreid = inst_pos->clk_data.core_id; + + mbs_per_sec = load_per_instance(inst_pos); + load = mbs_per_sec * vpp_freq; + + if ((coreid & VIDC_CORE_ID_3) == VIDC_CORE_ID_3) { + core1_load += load / 2; + core2_load += load / 2; + } else if (coreid & VIDC_CORE_ID_1) { + core1_load += load; + } else if (coreid & VIDC_CORE_ID_2) { + core2_load += load; + } + } + + *min_coreid = core1_load <= core2_load ? + VIDC_CORE_ID_1 : VIDC_CORE_ID_2; + *min_load = min(core1_load, core2_load); + + if (cores_max < VIDC_CORE_ID_2 || core->res->vcodec_num < 2) { + *min_coreid = VIDC_CORE_ID_1; + *min_load = core1_load; + } + + mutex_unlock(&core->lock); +} + +static int decide_core(struct venus_inst *inst) +{ + const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE; + struct venus_core *core = inst->core; + u32 min_coreid, min_load, inst_load; + struct hfi_videocores_usage_type cu; + unsigned long max_freq; + + if (legacy_binding) { + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + cu.video_core_enable_mask = VIDC_CORE_ID_1; + else + cu.video_core_enable_mask = VIDC_CORE_ID_2; + + goto done; + } + + if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT) + return 0; + + inst_load = load_per_instance(inst); + inst_load *= inst->clk_data.codec_freq_data->vpp_freq; + max_freq = core->res->freq_tbl[0].freq; + + min_loaded_core(inst, &min_coreid, &min_load); + + if ((inst_load + min_load) > max_freq) { + dev_warn(core->dev, "HW is overloaded, needed: %u max: %lu\n", + inst_load, max_freq); + return -EINVAL; + } + + inst->clk_data.core_id = min_coreid; + cu.video_core_enable_mask = min_coreid; + +done: + return hfi_session_set_property(inst, ptype, &cu); +} + +static int acquire_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + unsigned int coreid_mask = 0; + + if (inst->core_acquired) + return 0; + + inst->core_acquired = true; + + if (inst->clk_data.core_id & VIDC_CORE_ID_1) { + if (core->core0_usage_count++) + return 0; + + coreid_mask = VIDC_CORE_ID_1; + } + + if (inst->clk_data.core_id & VIDC_CORE_ID_2) { + if (core->core1_usage_count++) + return 0; + + coreid_mask |= VIDC_CORE_ID_2; + } + + return poweron_coreid(core, coreid_mask); +} + +static int release_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + unsigned int coreid_mask = 0; + int ret; + + if (!inst->core_acquired) + return 0; + + if (inst->clk_data.core_id & VIDC_CORE_ID_1) { + if (--core->core0_usage_count) + goto done; + + coreid_mask = VIDC_CORE_ID_1; + } + + if (inst->clk_data.core_id & VIDC_CORE_ID_2) { + if (--core->core1_usage_count) + goto done; + + coreid_mask |= VIDC_CORE_ID_2; + } + + ret = poweroff_coreid(core, coreid_mask); + if (ret) + return ret; + +done: + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; + return 0; +} + +static int coreid_power_v4(struct venus_inst *inst, int on) +{ + struct venus_core *core = inst->core; + int ret; + + if (legacy_binding) + return 0; + + if (on == POWER_ON) { + ret = decide_core(inst); + if (ret) + return ret; + + mutex_lock(&core->lock); + ret = acquire_core(inst); + mutex_unlock(&core->lock); + } else { + mutex_lock(&core->lock); + ret = release_core(inst); + mutex_unlock(&core->lock); + } + + return ret; +} + +static int vdec_get_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (!legacy_binding) + return 0; + + return vcodec_clks_get(core, dev, core->vcodec0_clks, + core->res->vcodec0_clks); +} + +static void vdec_put_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + unsigned int i; + + if (!legacy_binding) + return; + + for (i = 0; i < core->res->vcodec_clks_num; i++) + core->vcodec0_clks[i] = NULL; +} + +static int vdec_power_v4(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (!legacy_binding) + return 0; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec0_clks); + else + vcodec_clks_disable(core, core->vcodec0_clks); + + vcodec_control_v4(core, VIDC_CORE_ID_1, false); + + return ret; +} + +static int venc_get_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (!legacy_binding) + return 0; + + return vcodec_clks_get(core, dev, core->vcodec1_clks, + core->res->vcodec1_clks); +} + +static void venc_put_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + unsigned int i; + + if (!legacy_binding) + return; + + for (i = 0; i < core->res->vcodec_clks_num; i++) + core->vcodec1_clks[i] = NULL; +} + +static int venc_power_v4(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (!legacy_binding) + return 0; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec1_clks); + else + vcodec_clks_disable(core, core->vcodec1_clks); + + vcodec_control_v4(core, VIDC_CORE_ID_2, false); + + return ret; +} + +static int vcodec_domains_get(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_resources *res = core->res; + struct device *pd; + unsigned int i; + + if (!res->vcodec_pmdomains_num) + return -ENODEV; + + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + pd = dev_pm_domain_attach_by_name(dev, + res->vcodec_pmdomains[i]); + if (IS_ERR(pd)) + return PTR_ERR(pd); + core->pmdomains[i] = pd; + } + + core->pd_dl_venus = device_link_add(dev, core->pmdomains[0], + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS | + DL_FLAG_RPM_ACTIVE); + if (!core->pd_dl_venus) + return -ENODEV; + + return 0; +} + +static void vcodec_domains_put(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_resources *res = core->res; + unsigned int i; + + if (!res->vcodec_pmdomains_num) + return; + + if (core->pd_dl_venus) + device_link_del(core->pd_dl_venus); + + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + if (IS_ERR_OR_NULL(core->pmdomains[i])) + continue; + dev_pm_domain_detach(core->pmdomains[i], true); + } +} + +static int core_get_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_resources *res = core->res; + int ret; + + ret = core_clks_get(core); + if (ret) + return ret; + + if (!res->vcodec_pmdomains_num) + legacy_binding = true; + + dev_info(dev, "%s legacy binding\n", legacy_binding ? "" : "non"); + + ret = vcodec_clks_get(core, dev, core->vcodec0_clks, res->vcodec0_clks); + if (ret) + return ret; + + ret = vcodec_clks_get(core, dev, core->vcodec1_clks, res->vcodec1_clks); + if (ret) + return ret; + + if (legacy_binding) + return 0; + + ret = vcodec_domains_get(dev); + if (ret) + return ret; + + return 0; +} + +static void core_put_v4(struct device *dev) +{ + if (legacy_binding) + return; + + vcodec_domains_put(dev); +} + +static int core_power_v4(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + if (on == POWER_ON) + ret = core_clks_enable(core); + else + core_clks_disable(core); + + return ret; +} + +static unsigned long calculate_inst_freq(struct venus_inst *inst, + unsigned long filled_len) +{ + unsigned long vpp_freq = 0, vsp_freq = 0; + u32 fps = (u32)inst->fps; + u32 mbs_per_sec; + + mbs_per_sec = load_per_instance(inst) / fps; + + vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq; + /* 21 / 20 is overhead factor */ + vpp_freq += vpp_freq / 20; + vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq; + + /* 10 / 7 is overhead factor */ + if (inst->session_type == VIDC_SESSION_TYPE_ENC) + vsp_freq += (inst->controls.enc.bitrate * 10) / 7; + else + vsp_freq += ((fps * filled_len * 8) * 10) / 7; + + return max(vpp_freq, vsp_freq); +} + +static int load_scale_v4(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; + struct device *dev = core->dev; + unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; + unsigned long filled_len = 0; + int i, ret; + + for (i = 0; i < inst->num_input_bufs; i++) + filled_len = max(filled_len, inst->payloads[i]); + + if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) + return 0; + + freq = calculate_inst_freq(inst, filled_len); + inst->clk_data.freq = freq; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->clk_data.core_id == VIDC_CORE_ID_1) { + freq_core1 += inst->clk_data.freq; + } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) { + freq_core2 += inst->clk_data.freq; + } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) { + freq_core1 += inst->clk_data.freq; + freq_core2 += inst->clk_data.freq; + } + } + mutex_unlock(&core->lock); + + freq = max(freq_core1, freq_core2); + + if (freq >= table[0].freq) { + freq = table[0].freq; + dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n", + freq, table[0].freq); + goto set_freq; + } + + for (i = num_rows - 1 ; i >= 0; i--) { + if (freq <= table[i].freq) { + freq = table[i].freq; + break; + } + } + +set_freq: + + ret = core_clks_set_rate(core, freq); + if (ret) { + dev_err(dev, "failed to set clock rate %lu (%d)\n", + freq, ret); + return ret; + } + + ret = load_scale_bw(core); + if (ret) { + dev_err(dev, "failed to set bandwidth (%d)\n", + ret); + return ret; + } + + return 0; +} + +static const struct venus_pm_ops pm_ops_v4 = { + .core_get = core_get_v4, + .core_put = core_put_v4, + .core_power = core_power_v4, + .vdec_get = vdec_get_v4, + .vdec_put = vdec_put_v4, + .vdec_power = vdec_power_v4, + .venc_get = venc_get_v4, + .venc_put = venc_put_v4, + .venc_power = venc_power_v4, + .coreid_power = coreid_power_v4, + .load_scale = load_scale_v4, +}; + +const struct venus_pm_ops *venus_pm_get(enum hfi_version version) +{ + switch (version) { + case HFI_VERSION_1XX: + default: + return &pm_ops_v1; + case HFI_VERSION_3XX: + return &pm_ops_v3; + case HFI_VERSION_4XX: + return &pm_ops_v4; + } + + return NULL; +} diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h new file mode 100644 index 000000000000..aa2f6afa2354 --- /dev/null +++ b/drivers/media/platform/qcom/venus/pm_helpers.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2019 Linaro Ltd. */ +#ifndef __VENUS_PM_HELPERS_H__ +#define __VENUS_PM_HELPERS_H__ + +struct device; + +#define POWER_ON 1 +#define POWER_OFF 0 + +struct venus_pm_ops { + int (*core_get)(struct device *dev); + void (*core_put)(struct device *dev); + int (*core_power)(struct device *dev, int on); + + int (*vdec_get)(struct device *dev); + void (*vdec_put)(struct device *dev); + int (*vdec_power)(struct device *dev, int on); + + int (*venc_get)(struct device *dev); + void (*venc_put)(struct device *dev); + int (*venc_power)(struct device *dev, int on); + + int (*coreid_power)(struct venus_inst *inst, int on); + + int (*load_scale)(struct venus_inst *inst); +}; + +const struct venus_pm_ops *venus_pm_get(enum hfi_version version); + +static inline int venus_pm_load_scale(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + if (!core->pm_ops || !core->pm_ops->load_scale) + return 0; + + return core->pm_ops->load_scale(inst); +} + +static inline int venus_pm_acquire_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops && pm_ops->coreid_power) + ret = pm_ops->coreid_power(inst, POWER_ON); + + return ret; +} + +static inline int venus_pm_release_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops && pm_ops->coreid_power) + ret = pm_ops->coreid_power(inst, POWER_OFF); + + return ret; +} + +#endif diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 8feaf5daece9..4ed2628585a1 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -20,6 +20,7 @@ #include "core.h" #include "helpers.h" #include "vdec.h" +#include "pm_helpers.h" /* * Three resons to keep MPLANE formats (despite that the number of planes @@ -578,10 +579,6 @@ static int vdec_output_conf(struct venus_inst *inst) if (ret) return ret; - ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1); - if (ret) - return ret; - if (core->res->hfi_version == HFI_VERSION_1XX) { ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER; ret = hfi_session_set_property(inst, ptype, &en); @@ -868,7 +865,7 @@ reconfigure: if (ret) goto free_dpb_bufs; - venus_helper_load_scale_clocks(inst); + venus_pm_load_scale(inst); ret = hfi_session_continue(inst); if (ret) @@ -950,6 +947,10 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) mutex_lock(&inst->lock); + ret = venus_pm_acquire_core(inst); + if (ret) + goto error; + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ret = vdec_start_capture(inst); else @@ -1076,7 +1077,8 @@ static void vdec_session_release(struct venus_inst *inst) hfi_session_abort(inst); venus_helper_free_dpb_bufs(inst); - venus_helper_load_scale_clocks(inst); + venus_pm_load_scale(inst); + venus_pm_release_core(inst); INIT_LIST_HEAD(&inst->registeredbufs); mutex_unlock(&inst->lock); @@ -1191,6 +1193,9 @@ static void vdec_event_change(struct venus_inst *inst, inst->out_width = ev_data->width; inst->out_height = ev_data->height; + if (inst->bit_depth != ev_data->bit_depth) + inst->bit_depth = ev_data->bit_depth; + dev_dbg(dev, "event %s sufficient resources (%ux%u)\n", sufficient ? "" : "not", ev_data->width, ev_data->height); @@ -1336,6 +1341,9 @@ static int vdec_open(struct file *file) inst->num_output_bufs = 1; inst->codec_state = VENUS_DEC_STATE_DEINIT; inst->buf_count = 0; + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; + inst->bit_depth = VIDC_BITDEPTH_8; init_waitqueue_head(&inst->reconf_wait); venus_helper_init_instance(inst); @@ -1432,20 +1440,14 @@ static int vdec_probe(struct platform_device *pdev) if (!core) return -EPROBE_DEFER; - if (IS_V3(core) || IS_V4(core)) { - core->core0_clk = devm_clk_get(dev, "core"); - if (IS_ERR(core->core0_clk)) - return PTR_ERR(core->core0_clk); - } + platform_set_drvdata(pdev, core); - if (IS_V4(core)) { - core->core0_bus_clk = devm_clk_get(dev, "bus"); - if (IS_ERR(core->core0_bus_clk)) - return PTR_ERR(core->core0_bus_clk); + if (core->pm_ops->vdec_get) { + ret = core->pm_ops->vdec_get(dev); + if (ret) + return ret; } - platform_set_drvdata(pdev, core); - vdev = video_device_alloc(); if (!vdev) return -ENOMEM; @@ -1458,7 +1460,7 @@ static int vdec_probe(struct platform_device *pdev) vdev->v4l2_dev = &core->v4l2_dev; vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) goto err_vdev_release; @@ -1482,57 +1484,33 @@ static int vdec_remove(struct platform_device *pdev) video_unregister_device(core->vdev_dec); pm_runtime_disable(core->dev_dec); + if (core->pm_ops->vdec_put) + core->pm_ops->vdec_put(core->dev_dec); + return 0; } static __maybe_unused int vdec_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); - int ret; - - if (IS_V1(core)) - return 0; - - ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true); - if (ret) - return ret; - - if (IS_V4(core)) - clk_disable_unprepare(core->core0_bus_clk); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; - clk_disable_unprepare(core->core0_clk); + if (pm_ops->vdec_power) + ret = pm_ops->vdec_power(dev, POWER_OFF); - return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false); + return ret; } static __maybe_unused int vdec_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); - int ret; - - if (IS_V1(core)) - return 0; - - ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true); - if (ret) - return ret; - - ret = clk_prepare_enable(core->core0_clk); - if (ret) - goto err_power_disable; - - if (IS_V4(core)) - ret = clk_prepare_enable(core->core0_bus_clk); - - if (ret) - goto err_unprepare_core0; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; - return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false); + if (pm_ops->vdec_power) + ret = pm_ops->vdec_power(dev, POWER_ON); -err_unprepare_core0: - clk_disable_unprepare(core->core0_clk); -err_power_disable: - venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false); return ret; } diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 453edf966d4f..9981a2a27c90 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -20,6 +20,7 @@ #include "core.h" #include "helpers.h" #include "venc.h" +#include "pm_helpers.h" #define NUM_B_FRAMES_MAX 4 @@ -655,10 +656,6 @@ static int venc_set_properties(struct venus_inst *inst) if (ret) return ret; - ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2); - if (ret) - return ret; - ptype = HFI_PROPERTY_CONFIG_FRAME_RATE; frate.buffer_type = HFI_BUFFER_OUTPUT; frate.framerate = inst->fps * (1 << 16); @@ -731,7 +728,9 @@ static int venc_set_properties(struct venus_inst *inst) if (ret) return ret; - if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + if (!ctr->rc_enable) + rate_control = HFI_RATE_CONTROL_OFF; + else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) rate_control = HFI_RATE_CONTROL_VBR_CFR; else rate_control = HFI_RATE_CONTROL_CBR_CFR; @@ -991,6 +990,10 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count) if (ret) goto bufs_done; + ret = venus_pm_acquire_core(inst); + if (ret) + goto deinit_sess; + ret = venc_set_properties(inst); if (ret) goto deinit_sess; @@ -1159,6 +1162,8 @@ static int venc_open(struct file *file) inst->core = core; inst->session_type = VIDC_SESSION_TYPE_ENC; + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; venus_helper_init_instance(inst); @@ -1255,20 +1260,14 @@ static int venc_probe(struct platform_device *pdev) if (!core) return -EPROBE_DEFER; - if (IS_V3(core) || IS_V4(core)) { - core->core1_clk = devm_clk_get(dev, "core"); - if (IS_ERR(core->core1_clk)) - return PTR_ERR(core->core1_clk); - } + platform_set_drvdata(pdev, core); - if (IS_V4(core)) { - core->core1_bus_clk = devm_clk_get(dev, "bus"); - if (IS_ERR(core->core1_bus_clk)) - return PTR_ERR(core->core1_bus_clk); + if (core->pm_ops->venc_get) { + ret = core->pm_ops->venc_get(dev); + if (ret) + return ret; } - platform_set_drvdata(pdev, core); - vdev = video_device_alloc(); if (!vdev) return -ENOMEM; @@ -1281,7 +1280,7 @@ static int venc_probe(struct platform_device *pdev) vdev->v4l2_dev = &core->v4l2_dev; vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) goto err_vdev_release; @@ -1305,57 +1304,33 @@ static int venc_remove(struct platform_device *pdev) video_unregister_device(core->vdev_enc); pm_runtime_disable(core->dev_enc); + if (core->pm_ops->venc_put) + core->pm_ops->venc_put(core->dev_enc); + return 0; } static __maybe_unused int venc_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); - int ret; - - if (IS_V1(core)) - return 0; - - ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true); - if (ret) - return ret; - - if (IS_V4(core)) - clk_disable_unprepare(core->core1_bus_clk); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; - clk_disable_unprepare(core->core1_clk); + if (pm_ops->venc_power) + ret = pm_ops->venc_power(dev, POWER_OFF); - return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false); + return ret; } static __maybe_unused int venc_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); - int ret; - - if (IS_V1(core)) - return 0; - - ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true); - if (ret) - return ret; - - ret = clk_prepare_enable(core->core1_clk); - if (ret) - goto err_power_disable; - - if (IS_V4(core)) - ret = clk_prepare_enable(core->core1_bus_clk); - - if (ret) - goto err_unprepare_core1; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; - return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false); + if (pm_ops->venc_power) + ret = pm_ops->venc_power(dev, POWER_ON); -err_unprepare_core1: - clk_disable_unprepare(core->core1_clk); -err_power_disable: - venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false); return ret; } diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c index 877c0b3299e9..8362dde7949e 100644 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -199,6 +199,9 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) } mutex_unlock(&inst->lock); break; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + ctr->rc_enable = ctrl->val; + break; default: return -EINVAL; } @@ -214,7 +217,7 @@ int venc_ctrl_init(struct venus_inst *inst) { int ret; - ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 30); + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 31); if (ret) return ret; @@ -351,6 +354,9 @@ int venc_ctrl_init(struct venus_inst *inst) v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0); + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1); + ret = inst->ctrl_handler.error; if (ret) goto err; diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c index cf9029efeb04..1a30cd036371 100644 --- a/drivers/media/platform/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/rcar-vin/rcar-dma.c @@ -535,7 +535,7 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin) /* Set scaling coefficient */ crop_height = vin->crop.height; - if (V4L2_FIELD_IS_INTERLACED(vin->format.field)) + if (V4L2_FIELD_HAS_BOTH(vin->format.field)) crop_height *= 2; ys = 0; @@ -564,7 +564,7 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin) rvin_write(vin, 0, VNSLPOC_REG); rvin_write(vin, vin->format.width - 1, VNEPPOC_REG); - if (V4L2_FIELD_IS_INTERLACED(vin->format.field)) + if (V4L2_FIELD_HAS_BOTH(vin->format.field)) rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG); else rvin_write(vin, vin->format.height - 1, VNELPOC_REG); @@ -626,6 +626,8 @@ static int rvin_setup(struct rvin_dev *vin) case V4L2_FIELD_INTERLACED_BT: vnmc = VNMC_IM_FULL | VNMC_FOC; break; + case V4L2_FIELD_SEQ_TB: + case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_NONE: vnmc = VNMC_IM_ODD_EVEN; progressive = true; @@ -842,27 +844,52 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot) struct rvin_buffer *buf; struct vb2_v4l2_buffer *vbuf; dma_addr_t phys_addr; + int prev; /* A already populated slot shall never be overwritten. */ - if (WARN_ON(vin->queue_buf[slot] != NULL)) + if (WARN_ON(vin->buf_hw[slot].buffer)) return; - vin_dbg(vin, "Filling HW slot: %d\n", slot); - - if (list_empty(&vin->buf_list)) { - vin->queue_buf[slot] = NULL; + prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1; + + if (vin->buf_hw[prev].type == HALF_TOP) { + vbuf = vin->buf_hw[prev].buffer; + vin->buf_hw[slot].buffer = vbuf; + vin->buf_hw[slot].type = HALF_BOTTOM; + switch (vin->format.pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV16: + phys_addr = vin->buf_hw[prev].phys + + vin->format.sizeimage / 4; + break; + default: + phys_addr = vin->buf_hw[prev].phys + + vin->format.sizeimage / 2; + break; + } + } else if (list_empty(&vin->buf_list)) { + vin->buf_hw[slot].buffer = NULL; + vin->buf_hw[slot].type = FULL; phys_addr = vin->scratch_phys; } else { /* Keep track of buffer we give to HW */ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list); vbuf = &buf->vb; list_del_init(to_buf_list(vbuf)); - vin->queue_buf[slot] = vbuf; + vin->buf_hw[slot].buffer = vbuf; + + vin->buf_hw[slot].type = + V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ? + HALF_TOP : FULL; /* Setup DMA */ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0); } + vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n", + slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer); + + vin->buf_hw[slot].phys = phys_addr; rvin_set_slot_addr(vin, slot, phys_addr); } @@ -870,6 +897,11 @@ static int rvin_capture_start(struct rvin_dev *vin) { int slot, ret; + for (slot = 0; slot < HW_BUFFER_NUM; slot++) { + vin->buf_hw[slot].buffer = NULL; + vin->buf_hw[slot].type = FULL; + } + for (slot = 0; slot < HW_BUFFER_NUM; slot++) rvin_fill_hw_slot(vin, slot); @@ -953,13 +985,24 @@ static irqreturn_t rvin_irq(int irq, void *data) } /* Capture frame */ - if (vin->queue_buf[slot]) { - vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms); - vin->queue_buf[slot]->sequence = vin->sequence; - vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns(); - vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf, + if (vin->buf_hw[slot].buffer) { + /* + * Nothing to do but refill the hardware slot if + * capture only filled first half of vb2 buffer. + */ + if (vin->buf_hw[slot].type == HALF_TOP) { + vin->buf_hw[slot].buffer = NULL; + rvin_fill_hw_slot(vin, slot); + goto done; + } + + vin->buf_hw[slot].buffer->field = + rvin_get_active_field(vin, vnms); + vin->buf_hw[slot].buffer->sequence = vin->sequence; + vin->buf_hw[slot].buffer->vb2_buf.timestamp = ktime_get_ns(); + vb2_buffer_done(&vin->buf_hw[slot].buffer->vb2_buf, VB2_BUF_STATE_DONE); - vin->queue_buf[slot] = NULL; + vin->buf_hw[slot].buffer = NULL; } else { /* Scratch buffer was used, dropping frame. */ vin_dbg(vin, "Dropping frame %u\n", vin->sequence); @@ -980,14 +1023,22 @@ static void return_all_buffers(struct rvin_dev *vin, enum vb2_buffer_state state) { struct rvin_buffer *buf, *node; - int i; + struct vb2_v4l2_buffer *freed[HW_BUFFER_NUM]; + unsigned int i, n; for (i = 0; i < HW_BUFFER_NUM; i++) { - if (vin->queue_buf[i]) { - vb2_buffer_done(&vin->queue_buf[i]->vb2_buf, - state); - vin->queue_buf[i] = NULL; + freed[i] = vin->buf_hw[i].buffer; + vin->buf_hw[i].buffer = NULL; + + for (n = 0; n < i; n++) { + if (freed[i] == freed[n]) { + freed[i] = NULL; + break; + } } + + if (freed[i]) + vb2_buffer_done(&freed[i]->vb2_buf, state); } list_for_each_entry_safe(buf, node, &vin->buf_list, list) { @@ -1291,7 +1342,7 @@ int rvin_dma_register(struct rvin_dev *vin, int irq) vin->state = STOPPED; for (i = 0; i < HW_BUFFER_NUM; i++) - vin->queue_buf[i] = NULL; + vin->buf_hw[i].buffer = NULL; /* buffer queue */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index 5ff565e76bca..5151a3cd8a6e 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -73,11 +73,22 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin, { int i; - if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32) - return NULL; - - if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12) - return NULL; + switch (pixelformat) { + case V4L2_PIX_FMT_XBGR32: + if (vin->info->model == RCAR_M1) + return NULL; + break; + case V4L2_PIX_FMT_NV12: + /* + * If NV12 is supported it's only supported on channels 0, 1, 4, + * 5, 8, 9, 12 and 13. + */ + if (!vin->info->nv12 || !(BIT(vin->id) & 0x3333)) + return NULL; + break; + default: + break; + } for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) if (rvin_formats[i].fourcc == pixelformat) @@ -107,6 +118,9 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin, break; } + if (V4L2_FIELD_IS_SEQUENTIAL(pix->field)) + align = 0x80; + return ALIGN(pix->width, align) * fmt->bpp; } @@ -137,6 +151,8 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix) case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_ALTERNATE: + case V4L2_FIELD_SEQ_TB: + case V4L2_FIELD_SEQ_BT: break; default: pix->field = RVIN_DEFAULT_FIELD; @@ -826,7 +842,7 @@ static int rvin_open(struct file *file) goto err_unlock; if (vin->info->use_mc) - ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1); + ret = v4l2_pipeline_pm_get(&vin->vdev.entity); else if (v4l2_fh_is_singular_file(file)) ret = rvin_power_parallel(vin, true); @@ -842,7 +858,7 @@ static int rvin_open(struct file *file) return 0; err_power: if (vin->info->use_mc) - v4l2_pipeline_pm_use(&vin->vdev.entity, 0); + v4l2_pipeline_pm_put(&vin->vdev.entity); else if (v4l2_fh_is_singular_file(file)) rvin_power_parallel(vin, false); err_open: @@ -870,7 +886,7 @@ static int rvin_release(struct file *file) ret = _vb2_fop_release(file, NULL); if (vin->info->use_mc) { - v4l2_pipeline_pm_use(&vin->vdev.entity, 0); + v4l2_pipeline_pm_put(&vin->vdev.entity); } else { if (fh_singular) rvin_power_parallel(vin, false); @@ -953,7 +969,7 @@ int rvin_v4l2_register(struct rvin_dev *vin) rvin_format_align(vin, &vin->format); - ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&vin->vdev, VFL_TYPE_VIDEO, -1); if (ret) { vin_err(vin, "Failed to register video device\n"); return ret; diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h index a36b0824f81d..c19d077ce1cb 100644 --- a/drivers/media/platform/rcar-vin/rcar-vin.h +++ b/drivers/media/platform/rcar-vin/rcar-vin.h @@ -61,6 +61,23 @@ enum rvin_dma_state { }; /** + * enum rvin_buffer_type + * + * Describes how a buffer is given to the hardware. To be able + * to capture SEQ_TB/BT it's needed to capture to the same vb2 + * buffer twice so the type of buffer needs to be kept. + * + * FULL - One capture fills the whole vb2 buffer + * HALF_TOP - One capture fills the top half of the vb2 buffer + * HALF_BOTTOM - One capture fills the bottom half of the vb2 buffer + */ +enum rvin_buffer_type { + FULL, + HALF_TOP, + HALF_BOTTOM, +}; + +/** * struct rvin_video_format - Data format stored in memory * @fourcc: Pixelformat * @bpp: Bytes per pixel @@ -164,9 +181,8 @@ struct rvin_info { * @scratch: cpu address for scratch buffer * @scratch_phys: physical address of the scratch buffer * - * @qlock: protects @queue_buf, @buf_list, @sequence - * @state - * @queue_buf: Keeps track of buffers given to HW slot + * @qlock: protects @buf_hw, @buf_list, @sequence and @state + * @buf_hw: Keeps track of buffers given to HW slot * @buf_list: list of queued buffers * @sequence: V4L2 buffers sequence number * @state: keeps track of operation state @@ -205,7 +221,11 @@ struct rvin_dev { dma_addr_t scratch_phys; spinlock_t qlock; - struct vb2_v4l2_buffer *queue_buf[HW_BUFFER_NUM]; + struct { + struct vb2_v4l2_buffer *buffer; + enum rvin_buffer_type type; + dma_addr_t phys; + } buf_hw[HW_BUFFER_NUM]; struct list_head buf_list; unsigned int sequence; enum rvin_dma_state state; diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c index 0f267a237b42..3d2451ac347d 100644 --- a/drivers/media/platform/rcar_drif.c +++ b/drivers/media/platform/rcar_drif.c @@ -275,10 +275,14 @@ static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr) for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { struct rcar_drif *ch = sdr->ch[i]; - ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx"); - if (!ch->dmach) { - rdrif_err(sdr, "ch%u: dma channel req failed\n", i); - ret = -ENODEV; + ch->dmach = dma_request_chan(&ch->pdev->dev, "rx"); + if (IS_ERR(ch->dmach)) { + ret = PTR_ERR(ch->dmach); + if (ret != -EPROBE_DEFER) + rdrif_err(sdr, + "ch%u: dma channel req failed: %pe\n", + i, ch->dmach); + ch->dmach = NULL; goto dmach_error; } diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index 97bed45360f0..c9448de885b6 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -2344,7 +2344,7 @@ static int fdp1_probe(struct platform_device *pdev) video_set_drvdata(vfd, fdp1); strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name)); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n"); goto release_m2m; diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index 1c3f507acfc9..5250a14324e9 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1663,7 +1663,7 @@ static int jpu_probe(struct platform_device *pdev) jpu->vfd_encoder.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; - ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n"); goto m2m_init_rollback; @@ -1682,7 +1682,7 @@ static int jpu_probe(struct platform_device *pdev) jpu->vfd_decoder.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; - ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n"); goto enc_vdev_register_rollback; diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c index 197b3991330d..f7d71a6a7970 100644 --- a/drivers/media/platform/renesas-ceu.c +++ b/drivers/media/platform/renesas-ceu.c @@ -1450,7 +1450,7 @@ static int ceu_notify_complete(struct v4l2_async_notifier *notifier) V4L2_CAP_STREAMING; video_set_drvdata(vdev, ceudev); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { v4l2_err(vdev->v4l2_dev, "video_register_device failed: %d\n", ret); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index e9ff12b6b5bb..9d122429706e 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -889,7 +889,7 @@ static int rga_probe(struct platform_device *pdev) def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3; def_frame.size = def_frame.stride * def_frame.height; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&rga->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 2fb45db8e4ba..9ca49af29542 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -1158,7 +1158,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx) vfd->ctrl_handler = &vp->ctrl_handler; vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) goto err_ctrlh_free; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index f5f05ea9f521..6932fd47071b 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -695,7 +695,7 @@ static int g2d_probe(struct platform_device *pdev) vfd->lock = &dev->mutex; vfd->v4l2_dev = &dev->v4l2_dev; vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index ac2162235cef..86bda3947110 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -2946,7 +2946,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev) jpeg->vfd_encoder->vfl_dir = VFL_DIR_M2M; jpeg->vfd_encoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M; - ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1); + ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); video_device_release(jpeg->vfd_encoder); @@ -2976,7 +2976,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev) jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M; jpeg->vfd_decoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M; - ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); + ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); video_device_release(jpeg->vfd_decoder); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index b776f83e395e..5c2a23b953a4 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1376,7 +1376,7 @@ static int s5p_mfc_probe(struct platform_device *pdev) s5p_mfc_init_regs(dev); /* Register decoder and encoder */ - ret = video_register_device(dev->vfd_dec, VFL_TYPE_GRABBER, 0); + ret = video_register_device(dev->vfd_dec, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_dec_reg; @@ -1384,7 +1384,7 @@ static int s5p_mfc_probe(struct platform_device *pdev) v4l2_info(&dev->v4l2_dev, "decoder registered as /dev/video%d\n", dev->vfd_dec->num); - ret = video_register_device(dev->vfd_enc, VFL_TYPE_GRABBER, 0); + ret = video_register_device(dev->vfd_enc, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_enc_reg; diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 2b4c0d9d6928..f08b8fc192d8 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -1160,7 +1160,7 @@ static int sh_veu_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); pm_runtime_suspend(&pdev->dev); if (ret < 0) goto evidreg; diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c index 2236702c21b4..36e5f2ff4ef1 100644 --- a/drivers/media/platform/sh_vou.c +++ b/drivers/media/platform/sh_vou.c @@ -1323,7 +1323,7 @@ static int sh_vou_probe(struct platform_device *pdev) goto ei2cnd; } - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto evregdev; diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index d1025a53709f..af2d5eb782ce 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -1066,7 +1066,7 @@ static int bdisp_register_device(struct bdisp_dev *bdisp) return PTR_ERR(bdisp->m2m.m2m_dev); } - ret = video_register_device(&bdisp->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&bdisp->vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(bdisp->dev, "%s(): failed to register video device\n", __func__); diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c index 91369fb3ffaa..2503224eeee5 100644 --- a/drivers/media/platform/sti/delta/delta-v4l2.c +++ b/drivers/media/platform/sti/delta/delta-v4l2.c @@ -1781,7 +1781,7 @@ static int delta_register_device(struct delta_dev *delta) snprintf(vdev->name, sizeof(vdev->name), "%s-%s", DELTA_NAME, DELTA_FW_VERSION); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(delta->dev, "%s failed to register video device\n", DELTA_PREFIX); diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c index 64004d15a9c9..197b99d8fd9c 100644 --- a/drivers/media/platform/sti/hva/hva-v4l2.c +++ b/drivers/media/platform/sti/hva/hva-v4l2.c @@ -1316,7 +1316,7 @@ static int hva_register_device(struct hva_dev *hva) snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME, hva->ip_version); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(dev, "%s failed to register video device\n", HVA_PREFIX); diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c index 8a86b2cc22fa..ea4b1ebfca99 100644 --- a/drivers/media/platform/stm32/stm32-cec.c +++ b/drivers/media/platform/stm32/stm32-cec.c @@ -291,7 +291,9 @@ static int stm32_cec_probe(struct platform_device *pdev) cec->clk_cec = devm_clk_get(&pdev->dev, "cec"); if (IS_ERR(cec->clk_cec)) { - dev_err(&pdev->dev, "Cannot get cec clock\n"); + if (PTR_ERR(cec->clk_cec) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Cannot get cec clock\n"); + return PTR_ERR(cec->clk_cec); } @@ -302,10 +304,14 @@ static int stm32_cec_probe(struct platform_device *pdev) } cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec"); + if (IS_ERR(cec->clk_hdmi_cec) && + PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR(cec->clk_hdmi_cec)) { ret = clk_prepare(cec->clk_hdmi_cec); if (ret) { - dev_err(&pdev->dev, "Unable to prepare hdmi-cec clock\n"); + dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n"); return ret; } } diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 9392e3409fba..b8931490b83b 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -1910,10 +1910,13 @@ static int dcmi_probe(struct platform_device *pdev) return PTR_ERR(mclk); } - chan = dma_request_slave_channel(&pdev->dev, "tx"); - if (!chan) { - dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n"); - return -EPROBE_DEFER; + chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Failed to request DMA channel: %d\n", ret); + return ret; } spin_lock_init(&dcmi->irqlock); @@ -1971,7 +1974,7 @@ static int dcmi_probe(struct platform_device *pdev) } dcmi->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT; - ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(dcmi->vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(dcmi->dev, "Failed to register video device\n"); goto err_media_entity_cleanup; diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile index 3878cb4efdc2..ff0993f70dc3 100644 --- a/drivers/media/platform/sunxi/Makefile +++ b/drivers/media/platform/sunxi/Makefile @@ -1,3 +1,4 @@ obj-y += sun4i-csi/ obj-y += sun6i-csi/ obj-y += sun8i-di/ +obj-y += sun8i-rotate/ diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c index 83a3a0257c7b..1721e5aee9c6 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c @@ -214,7 +214,7 @@ static int sun4i_csi_open(struct file *file) if (ret < 0) goto err_pm_put; - ret = v4l2_pipeline_pm_use(&csi->vdev.entity, 1); + ret = v4l2_pipeline_pm_get(&csi->vdev.entity); if (ret) goto err_pm_put; @@ -227,7 +227,7 @@ static int sun4i_csi_open(struct file *file) return 0; err_pipeline_pm_put: - v4l2_pipeline_pm_use(&csi->vdev.entity, 0); + v4l2_pipeline_pm_put(&csi->vdev.entity); err_pm_put: pm_runtime_put(csi->dev); @@ -243,7 +243,7 @@ static int sun4i_csi_release(struct file *file) mutex_lock(&csi->lock); v4l2_fh_release(file); - v4l2_pipeline_pm_use(&csi->vdev.entity, 0); + v4l2_pipeline_pm_put(&csi->vdev.entity); pm_runtime_put(csi->dev); mutex_unlock(&csi->lock); @@ -374,7 +374,7 @@ int sun4i_csi_v4l2_register(struct sun4i_csi *csi) vdev->ioctl_ops = &sun4i_csi_ioctl_ops; video_set_drvdata(vdev, csi); - ret = video_register_device(&csi->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&csi->vdev, VFL_TYPE_VIDEO, -1); if (ret) return ret; diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index f0dfe68486d1..d9648b2810b9 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -474,7 +474,7 @@ static int sun6i_video_open(struct file *file) if (ret < 0) goto unlock; - ret = v4l2_pipeline_pm_use(&video->vdev.entity, 1); + ret = v4l2_pipeline_pm_get(&video->vdev.entity); if (ret < 0) goto fh_release; @@ -507,7 +507,7 @@ static int sun6i_video_close(struct file *file) _vb2_fop_release(file, NULL); - v4l2_pipeline_pm_use(&video->vdev.entity, 0); + v4l2_pipeline_pm_put(&video->vdev.entity); if (last_fh) sun6i_csi_set_power(video->csi, false); @@ -648,7 +648,7 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, vdev->release = video_device_release_empty; vdev->fops = &sun6i_video_fops; vdev->ioctl_ops = &sun6i_video_ioctl_ops; - vdev->vfl_type = VFL_TYPE_GRABBER; + vdev->vfl_type = VFL_TYPE_VIDEO; vdev->vfl_dir = VFL_DIR_RX; vdev->v4l2_dev = &csi->v4l2_dev; vdev->queue = vidq; @@ -656,7 +656,7 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; video_set_drvdata(vdev, video); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { v4l2_err(&csi->v4l2_dev, "video_register_device failed: %d\n", ret); diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c index b61f3dea7c93..d78f6593ddd1 100644 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c @@ -814,11 +814,8 @@ static int deinterlace_probe(struct platform_device *pdev) dev->dev = &pdev->dev; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev->dev, "Failed to get IRQ\n"); - + if (irq <= 0) return irq; - } ret = devm_request_irq(dev->dev, irq, deinterlace_irq, 0, dev_name(dev->dev), dev); @@ -882,7 +879,7 @@ static int deinterlace_probe(struct platform_device *pdev) deinterlace_video_device.name); video_set_drvdata(vfd, dev); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); diff --git a/drivers/media/platform/sunxi/sun8i-rotate/Makefile b/drivers/media/platform/sunxi/sun8i-rotate/Makefile new file mode 100644 index 000000000000..40f9cf398e98 --- /dev/null +++ b/drivers/media/platform/sunxi/sun8i-rotate/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +sun8i-rotate-y += sun8i_rotate.o +sun8i-rotate-y += sun8i_formats.o + +obj-$(CONFIG_VIDEO_SUN8I_ROTATE) += sun8i-rotate.o diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h new file mode 100644 index 000000000000..697cd5fadd5f --- /dev/null +++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> */ + +#ifndef _SUN8I_FORMATS_H_ +#define _SUN8I_FORMATS_H_ + +#include <linux/videodev2.h> + +#define ROTATE_FLAG_YUV BIT(0) +#define ROTATE_FLAG_OUTPUT BIT(1) + +struct rotate_format { + u32 fourcc; + u32 hw_format; + int planes; + int bpp[3]; + int hsub; + int vsub; + unsigned int flags; +}; + +const struct rotate_format *rotate_find_format(u32 pixelformat); +int rotate_enum_fmt(struct v4l2_fmtdesc *f, bool dst); + +#endif diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h new file mode 100644 index 000000000000..32ade97ba572 --- /dev/null +++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Allwinner DE2 rotation driver + * + * Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> + */ + +#ifndef _SUN8I_ROTATE_H_ +#define _SUN8I_ROTATE_H_ + +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-v4l2.h> +#include <media/videobuf2-dma-contig.h> + +#include <linux/platform_device.h> + +#define ROTATE_NAME "sun8i-rotate" + +#define ROTATE_GLB_CTL 0x00 +#define ROTATE_GLB_CTL_START BIT(31) +#define ROTATE_GLB_CTL_RESET BIT(30) +#define ROTATE_GLB_CTL_BURST_LEN(x) ((x) << 16) +#define ROTATE_GLB_CTL_HFLIP BIT(7) +#define ROTATE_GLB_CTL_VFLIP BIT(6) +#define ROTATE_GLB_CTL_ROTATION(x) ((x) << 4) +#define ROTATE_GLB_CTL_MODE(x) ((x) << 0) + +#define ROTATE_INT 0x04 +#define ROTATE_INT_FINISH_IRQ_EN BIT(16) +#define ROTATE_INT_FINISH_IRQ BIT(0) + +#define ROTATE_IN_FMT 0x20 +#define ROTATE_IN_FMT_FORMAT(x) ((x) << 0) + +#define ROTATE_IN_SIZE 0x24 +#define ROTATE_IN_PITCH0 0x30 +#define ROTATE_IN_PITCH1 0x34 +#define ROTATE_IN_PITCH2 0x38 +#define ROTATE_IN_ADDRL0 0x40 +#define ROTATE_IN_ADDRH0 0x44 +#define ROTATE_IN_ADDRL1 0x48 +#define ROTATE_IN_ADDRH1 0x4c +#define ROTATE_IN_ADDRL2 0x50 +#define ROTATE_IN_ADDRH2 0x54 +#define ROTATE_OUT_SIZE 0x84 +#define ROTATE_OUT_PITCH0 0x90 +#define ROTATE_OUT_PITCH1 0x94 +#define ROTATE_OUT_PITCH2 0x98 +#define ROTATE_OUT_ADDRL0 0xA0 +#define ROTATE_OUT_ADDRH0 0xA4 +#define ROTATE_OUT_ADDRL1 0xA8 +#define ROTATE_OUT_ADDRH1 0xAC +#define ROTATE_OUT_ADDRL2 0xB0 +#define ROTATE_OUT_ADDRH2 0xB4 + +#define ROTATE_BURST_8 0x07 +#define ROTATE_BURST_16 0x0f +#define ROTATE_BURST_32 0x1f +#define ROTATE_BURST_64 0x3f + +#define ROTATE_MODE_COPY_ROTATE 0x01 + +#define ROTATE_FORMAT_ARGB32 0x00 +#define ROTATE_FORMAT_ABGR32 0x01 +#define ROTATE_FORMAT_RGBA32 0x02 +#define ROTATE_FORMAT_BGRA32 0x03 +#define ROTATE_FORMAT_XRGB32 0x04 +#define ROTATE_FORMAT_XBGR32 0x05 +#define ROTATE_FORMAT_RGBX32 0x06 +#define ROTATE_FORMAT_BGRX32 0x07 +#define ROTATE_FORMAT_RGB24 0x08 +#define ROTATE_FORMAT_BGR24 0x09 +#define ROTATE_FORMAT_RGB565 0x0a +#define ROTATE_FORMAT_BGR565 0x0b +#define ROTATE_FORMAT_ARGB4444 0x0c +#define ROTATE_FORMAT_ABGR4444 0x0d +#define ROTATE_FORMAT_RGBA4444 0x0e +#define ROTATE_FORMAT_BGRA4444 0x0f +#define ROTATE_FORMAT_ARGB1555 0x10 +#define ROTATE_FORMAT_ABGR1555 0x11 +#define ROTATE_FORMAT_RGBA5551 0x12 +#define ROTATE_FORMAT_BGRA5551 0x13 + +#define ROTATE_FORMAT_YUYV 0x20 +#define ROTATE_FORMAT_UYVY 0x21 +#define ROTATE_FORMAT_YVYU 0x22 +#define ROTATE_FORMAT_VYUV 0x23 +#define ROTATE_FORMAT_NV61 0x24 +#define ROTATE_FORMAT_NV16 0x25 +#define ROTATE_FORMAT_YUV422P 0x26 +#define ROTATE_FORMAT_NV21 0x28 +#define ROTATE_FORMAT_NV12 0x29 +#define ROTATE_FORMAT_YUV420P 0x2A + +#define ROTATE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) + +#define ROTATE_MIN_WIDTH 8U +#define ROTATE_MIN_HEIGHT 8U +#define ROTATE_MAX_WIDTH 4096U +#define ROTATE_MAX_HEIGHT 4096U + +struct rotate_ctx { + struct v4l2_fh fh; + struct rotate_dev *dev; + + struct v4l2_pix_format src_fmt; + struct v4l2_pix_format dst_fmt; + + struct v4l2_ctrl_handler ctrl_handler; + + u32 hflip; + u32 vflip; + u32 rotate; +}; + +struct rotate_dev { + struct v4l2_device v4l2_dev; + struct video_device vfd; + struct device *dev; + struct v4l2_m2m_dev *m2m_dev; + + /* Device file mutex */ + struct mutex dev_mutex; + + void __iomem *base; + + struct clk *bus_clk; + struct clk *mod_clk; + + struct reset_control *rstc; +}; + +#endif diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c new file mode 100644 index 000000000000..cebfbc5def38 --- /dev/null +++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> */ + +#include "sun8i-formats.h" +#include "sun8i-rotate.h" + +/* + * Formats not included in array: + * ROTATE_FORMAT_BGR565 + * ROTATE_FORMAT_VYUV + */ + +static const struct rotate_format rotate_formats[] = { + { + .fourcc = V4L2_PIX_FMT_ARGB32, + .hw_format = ROTATE_FORMAT_ARGB32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_ABGR32, + .hw_format = ROTATE_FORMAT_ABGR32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGBA32, + .hw_format = ROTATE_FORMAT_RGBA32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_BGRA32, + .hw_format = ROTATE_FORMAT_BGRA32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_XRGB32, + .hw_format = ROTATE_FORMAT_XRGB32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_XBGR32, + .hw_format = ROTATE_FORMAT_XBGR32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGB32, + .hw_format = ROTATE_FORMAT_RGBX32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_BGR32, + .hw_format = ROTATE_FORMAT_BGRX32, + .planes = 1, + .bpp = { 4, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGB24, + .hw_format = ROTATE_FORMAT_RGB24, + .planes = 1, + .bpp = { 3, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_BGR24, + .hw_format = ROTATE_FORMAT_BGR24, + .planes = 1, + .bpp = { 3, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGB565, + .hw_format = ROTATE_FORMAT_RGB565, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_ARGB444, + .hw_format = ROTATE_FORMAT_ARGB4444, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_ABGR444, + .hw_format = ROTATE_FORMAT_ABGR4444, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGBA444, + .hw_format = ROTATE_FORMAT_RGBA4444, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_BGRA444, + .hw_format = ROTATE_FORMAT_BGRA4444, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_ARGB555, + .hw_format = ROTATE_FORMAT_ARGB1555, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_ABGR555, + .hw_format = ROTATE_FORMAT_ABGR1555, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_RGBA555, + .hw_format = ROTATE_FORMAT_RGBA5551, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_BGRA555, + .hw_format = ROTATE_FORMAT_BGRA5551, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 1, + .vsub = 1, + .flags = ROTATE_FLAG_OUTPUT + }, { + .fourcc = V4L2_PIX_FMT_YVYU, + .hw_format = ROTATE_FORMAT_YVYU, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_UYVY, + .hw_format = ROTATE_FORMAT_UYVY, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_YUYV, + .hw_format = ROTATE_FORMAT_YUYV, + .planes = 1, + .bpp = { 2, 0, 0 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_NV61, + .hw_format = ROTATE_FORMAT_NV61, + .planes = 2, + .bpp = { 1, 2, 0 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_NV16, + .hw_format = ROTATE_FORMAT_NV16, + .planes = 2, + .bpp = { 1, 2, 0 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_YUV422P, + .hw_format = ROTATE_FORMAT_YUV422P, + .planes = 3, + .bpp = { 1, 1, 1 }, + .hsub = 2, + .vsub = 1, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_NV21, + .hw_format = ROTATE_FORMAT_NV21, + .planes = 2, + .bpp = { 1, 2, 0 }, + .hsub = 2, + .vsub = 2, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_NV12, + .hw_format = ROTATE_FORMAT_NV12, + .planes = 2, + .bpp = { 1, 2, 0 }, + .hsub = 2, + .vsub = 2, + .flags = ROTATE_FLAG_YUV + }, { + .fourcc = V4L2_PIX_FMT_YUV420, + .hw_format = ROTATE_FORMAT_YUV420P, + .planes = 3, + .bpp = { 1, 1, 1 }, + .hsub = 2, + .vsub = 2, + .flags = ROTATE_FLAG_YUV | ROTATE_FLAG_OUTPUT + }, +}; + +const struct rotate_format *rotate_find_format(u32 pixelformat) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rotate_formats); i++) + if (rotate_formats[i].fourcc == pixelformat) + return &rotate_formats[i]; + + return NULL; +} + +int rotate_enum_fmt(struct v4l2_fmtdesc *f, bool dst) +{ + int i, index; + + index = 0; + + for (i = 0; i < ARRAY_SIZE(rotate_formats); i++) { + /* not all formats can be used for capture buffers */ + if (dst && !(rotate_formats[i].flags & ROTATE_FLAG_OUTPUT)) + continue; + + if (index == f->index) { + f->pixelformat = rotate_formats[i].fourcc; + + return 0; + } + + index++; + } + + return -EINVAL; +} diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c new file mode 100644 index 000000000000..94f505d3cbad --- /dev/null +++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c @@ -0,0 +1,924 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Allwinner sun8i DE2 rotation driver + * + * Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mem2mem.h> + +#include "sun8i-formats.h" +#include "sun8i-rotate.h" + +static inline u32 rotate_read(struct rotate_dev *dev, u32 reg) +{ + return readl(dev->base + reg); +} + +static inline void rotate_write(struct rotate_dev *dev, u32 reg, u32 value) +{ + writel(value, dev->base + reg); +} + +static inline void rotate_set_bits(struct rotate_dev *dev, u32 reg, u32 bits) +{ + writel(readl(dev->base + reg) | bits, dev->base + reg); +} + +static void rotate_calc_addr_pitch(dma_addr_t buffer, + u32 bytesperline, u32 height, + const struct rotate_format *fmt, + dma_addr_t *addr, u32 *pitch) +{ + u32 size; + int i; + + for (i = 0; i < fmt->planes; i++) { + pitch[i] = bytesperline; + addr[i] = buffer; + if (i > 0) + pitch[i] /= fmt->hsub / fmt->bpp[i]; + size = pitch[i] * height; + if (i > 0) + size /= fmt->vsub; + buffer += size; + } +} + +static void rotate_device_run(void *priv) +{ + struct rotate_ctx *ctx = priv; + struct rotate_dev *dev = ctx->dev; + struct vb2_v4l2_buffer *src, *dst; + const struct rotate_format *fmt; + dma_addr_t addr[3]; + u32 val, pitch[3]; + + src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); + + v4l2_m2m_buf_copy_metadata(src, dst, true); + + val = ROTATE_GLB_CTL_MODE(ROTATE_MODE_COPY_ROTATE); + if (ctx->hflip) + val |= ROTATE_GLB_CTL_HFLIP; + if (ctx->vflip) + val |= ROTATE_GLB_CTL_VFLIP; + val |= ROTATE_GLB_CTL_ROTATION(ctx->rotate / 90); + if (ctx->rotate != 90 && ctx->rotate != 270) + val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_64); + else + val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_8); + rotate_write(dev, ROTATE_GLB_CTL, val); + + fmt = rotate_find_format(ctx->src_fmt.pixelformat); + if (!fmt) + return; + + rotate_write(dev, ROTATE_IN_FMT, ROTATE_IN_FMT_FORMAT(fmt->hw_format)); + + rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0), + ctx->src_fmt.bytesperline, ctx->src_fmt.height, + fmt, addr, pitch); + + rotate_write(dev, ROTATE_IN_SIZE, + ROTATE_SIZE(ctx->src_fmt.width, ctx->src_fmt.height)); + + rotate_write(dev, ROTATE_IN_PITCH0, pitch[0]); + rotate_write(dev, ROTATE_IN_PITCH1, pitch[1]); + rotate_write(dev, ROTATE_IN_PITCH2, pitch[2]); + + rotate_write(dev, ROTATE_IN_ADDRL0, addr[0]); + rotate_write(dev, ROTATE_IN_ADDRL1, addr[1]); + rotate_write(dev, ROTATE_IN_ADDRL2, addr[2]); + + rotate_write(dev, ROTATE_IN_ADDRH0, 0); + rotate_write(dev, ROTATE_IN_ADDRH1, 0); + rotate_write(dev, ROTATE_IN_ADDRH2, 0); + + fmt = rotate_find_format(ctx->dst_fmt.pixelformat); + if (!fmt) + return; + + rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0), + ctx->dst_fmt.bytesperline, ctx->dst_fmt.height, + fmt, addr, pitch); + + rotate_write(dev, ROTATE_OUT_SIZE, + ROTATE_SIZE(ctx->dst_fmt.width, ctx->dst_fmt.height)); + + rotate_write(dev, ROTATE_OUT_PITCH0, pitch[0]); + rotate_write(dev, ROTATE_OUT_PITCH1, pitch[1]); + rotate_write(dev, ROTATE_OUT_PITCH2, pitch[2]); + + rotate_write(dev, ROTATE_OUT_ADDRL0, addr[0]); + rotate_write(dev, ROTATE_OUT_ADDRL1, addr[1]); + rotate_write(dev, ROTATE_OUT_ADDRL2, addr[2]); + + rotate_write(dev, ROTATE_OUT_ADDRH0, 0); + rotate_write(dev, ROTATE_OUT_ADDRH1, 0); + rotate_write(dev, ROTATE_OUT_ADDRH2, 0); + + rotate_set_bits(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ_EN); + rotate_set_bits(dev, ROTATE_GLB_CTL, ROTATE_GLB_CTL_START); +} + +static irqreturn_t rotate_irq(int irq, void *data) +{ + struct vb2_v4l2_buffer *buffer; + struct rotate_dev *dev = data; + struct rotate_ctx *ctx; + unsigned int val; + + ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); + if (!ctx) { + v4l2_err(&dev->v4l2_dev, + "Instance released before the end of transaction\n"); + return IRQ_NONE; + } + + val = rotate_read(dev, ROTATE_INT); + if (!(val & ROTATE_INT_FINISH_IRQ)) + return IRQ_NONE; + + /* clear flag and disable irq */ + rotate_write(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ); + + buffer = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE); + + buffer = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE); + + v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); + + return IRQ_HANDLED; +} + +static inline struct rotate_ctx *rotate_file2ctx(struct file *file) +{ + return container_of(file->private_data, struct rotate_ctx, fh); +} + +static void rotate_prepare_format(struct v4l2_pix_format *pix_fmt) +{ + unsigned int height, width, alignment, sizeimage, size, bpl; + const struct rotate_format *fmt; + int i; + + fmt = rotate_find_format(pix_fmt->pixelformat); + if (!fmt) + return; + + width = ALIGN(pix_fmt->width, fmt->hsub); + height = ALIGN(pix_fmt->height, fmt->vsub); + + /* all pitches have to be 16 byte aligned */ + alignment = 16; + if (fmt->planes > 1) + alignment *= fmt->hsub / fmt->bpp[1]; + bpl = ALIGN(width * fmt->bpp[0], alignment); + + sizeimage = 0; + for (i = 0; i < fmt->planes; i++) { + size = bpl * height; + if (i > 0) { + size *= fmt->bpp[i]; + size /= fmt->hsub; + size /= fmt->vsub; + } + sizeimage += size; + } + + pix_fmt->width = width; + pix_fmt->height = height; + pix_fmt->bytesperline = bpl; + pix_fmt->sizeimage = sizeimage; +} + +static int rotate_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + strscpy(cap->driver, ROTATE_NAME, sizeof(cap->driver)); + strscpy(cap->card, ROTATE_NAME, sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), + "platform:%s", ROTATE_NAME); + + return 0; +} + +static int rotate_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + return rotate_enum_fmt(f, true); +} + +static int rotate_enum_fmt_vid_out(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + return rotate_enum_fmt(f, false); +} + +static int rotate_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) +{ + const struct rotate_format *fmt; + + if (fsize->index != 0) + return -EINVAL; + + fmt = rotate_find_format(fsize->pixel_format); + if (!fmt) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = ROTATE_MIN_WIDTH; + fsize->stepwise.min_height = ROTATE_MIN_HEIGHT; + fsize->stepwise.max_width = ROTATE_MAX_WIDTH; + fsize->stepwise.max_height = ROTATE_MAX_HEIGHT; + fsize->stepwise.step_width = fmt->hsub; + fsize->stepwise.step_height = fmt->vsub; + + return 0; +} + +static int rotate_set_cap_format(struct rotate_ctx *ctx, + struct v4l2_pix_format *f, + u32 rotate) +{ + const struct rotate_format *fmt; + + fmt = rotate_find_format(ctx->src_fmt.pixelformat); + if (!fmt) + return -EINVAL; + + if (fmt->flags & ROTATE_FLAG_YUV) + f->pixelformat = V4L2_PIX_FMT_YUV420; + else + f->pixelformat = ctx->src_fmt.pixelformat; + + f->field = V4L2_FIELD_NONE; + + if (rotate == 90 || rotate == 270) { + f->width = ctx->src_fmt.height; + f->height = ctx->src_fmt.width; + } else { + f->width = ctx->src_fmt.width; + f->height = ctx->src_fmt.height; + } + + rotate_prepare_format(f); + + return 0; +} + +static int rotate_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct rotate_ctx *ctx = rotate_file2ctx(file); + + f->fmt.pix = ctx->dst_fmt; + + return 0; +} + +static int rotate_g_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct rotate_ctx *ctx = rotate_file2ctx(file); + + f->fmt.pix = ctx->src_fmt; + + return 0; +} + +static int rotate_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct rotate_ctx *ctx = rotate_file2ctx(file); + + return rotate_set_cap_format(ctx, &f->fmt.pix, ctx->rotate); +} + +static int rotate_try_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) +{ + if (!rotate_find_format(f->fmt.pix.pixelformat)) + f->fmt.pix.pixelformat = V4L2_PIX_FMT_ARGB32; + + if (f->fmt.pix.width < ROTATE_MIN_WIDTH) + f->fmt.pix.width = ROTATE_MIN_WIDTH; + if (f->fmt.pix.height < ROTATE_MIN_HEIGHT) + f->fmt.pix.height = ROTATE_MIN_HEIGHT; + + if (f->fmt.pix.width > ROTATE_MAX_WIDTH) + f->fmt.pix.width = ROTATE_MAX_WIDTH; + if (f->fmt.pix.height > ROTATE_MAX_HEIGHT) + f->fmt.pix.height = ROTATE_MAX_HEIGHT; + + f->fmt.pix.field = V4L2_FIELD_NONE; + + rotate_prepare_format(&f->fmt.pix); + + return 0; +} + +static int rotate_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct rotate_ctx *ctx = rotate_file2ctx(file); + struct vb2_queue *vq; + int ret; + + ret = rotate_try_fmt_vid_cap(file, priv, f); + if (ret) + return ret; + + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); + if (vb2_is_busy(vq)) + return -EBUSY; + + ctx->dst_fmt = f->fmt.pix; + + return 0; +} + +static int rotate_s_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct rotate_ctx *ctx = rotate_file2ctx(file); + struct vb2_queue *vq; + int ret; + + ret = rotate_try_fmt_vid_out(file, priv, f); + if (ret) + return ret; + + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); + if (vb2_is_busy(vq)) + return -EBUSY; + + /* + * Capture queue has to be also checked, because format and size + * depends on output format and size. + */ + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (vb2_is_busy(vq)) + return -EBUSY; + + ctx->src_fmt = f->fmt.pix; + + /* Propagate colorspace information to capture. */ + ctx->dst_fmt.colorspace = f->fmt.pix.colorspace; + ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func; + ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc; + ctx->dst_fmt.quantization = f->fmt.pix.quantization; + + return rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate); +} + +static const struct v4l2_ioctl_ops rotate_ioctl_ops = { + .vidioc_querycap = rotate_querycap, + + .vidioc_enum_framesizes = rotate_enum_framesizes, + + .vidioc_enum_fmt_vid_cap = rotate_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = rotate_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = rotate_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = rotate_s_fmt_vid_cap, + + .vidioc_enum_fmt_vid_out = rotate_enum_fmt_vid_out, + .vidioc_g_fmt_vid_out = rotate_g_fmt_vid_out, + .vidioc_try_fmt_vid_out = rotate_try_fmt_vid_out, + .vidioc_s_fmt_vid_out = rotate_s_fmt_vid_out, + + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static int rotate_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, + unsigned int *nplanes, unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct rotate_ctx *ctx = vb2_get_drv_priv(vq); + struct v4l2_pix_format *pix_fmt; + + if (V4L2_TYPE_IS_OUTPUT(vq->type)) + pix_fmt = &ctx->src_fmt; + else + pix_fmt = &ctx->dst_fmt; + + if (*nplanes) { + if (sizes[0] < pix_fmt->sizeimage) + return -EINVAL; + } else { + sizes[0] = pix_fmt->sizeimage; + *nplanes = 1; + } + + return 0; +} + +static int rotate_buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_queue *vq = vb->vb2_queue; + struct rotate_ctx *ctx = vb2_get_drv_priv(vq); + struct v4l2_pix_format *pix_fmt; + + if (V4L2_TYPE_IS_OUTPUT(vq->type)) + pix_fmt = &ctx->src_fmt; + else + pix_fmt = &ctx->dst_fmt; + + if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage) + return -EINVAL; + + vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage); + + return 0; +} + +static void rotate_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rotate_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); +} + +static void rotate_queue_cleanup(struct vb2_queue *vq, u32 state) +{ + struct rotate_ctx *ctx = vb2_get_drv_priv(vq); + struct vb2_v4l2_buffer *vbuf; + + do { + if (V4L2_TYPE_IS_OUTPUT(vq->type)) + vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + else + vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + + if (vbuf) + v4l2_m2m_buf_done(vbuf, state); + } while (vbuf); +} + +static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + if (V4L2_TYPE_IS_OUTPUT(vq->type)) { + struct rotate_ctx *ctx = vb2_get_drv_priv(vq); + struct device *dev = ctx->dev->dev; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to enable module\n"); + + return ret; + } + } + + return 0; +} + +static void rotate_stop_streaming(struct vb2_queue *vq) +{ + if (V4L2_TYPE_IS_OUTPUT(vq->type)) { + struct rotate_ctx *ctx = vb2_get_drv_priv(vq); + + pm_runtime_put(ctx->dev->dev); + } + + rotate_queue_cleanup(vq, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops rotate_qops = { + .queue_setup = rotate_queue_setup, + .buf_prepare = rotate_buf_prepare, + .buf_queue = rotate_buf_queue, + .start_streaming = rotate_start_streaming, + .stop_streaming = rotate_stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +static int rotate_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct rotate_ctx *ctx = priv; + int ret; + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + src_vq->io_modes = VB2_MMAP | VB2_DMABUF; + src_vq->drv_priv = ctx; + src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + src_vq->min_buffers_needed = 1; + src_vq->ops = &rotate_qops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->lock = &ctx->dev->dev_mutex; + src_vq->dev = ctx->dev->dev; + + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; + dst_vq->drv_priv = ctx; + dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + dst_vq->min_buffers_needed = 2; + dst_vq->ops = &rotate_qops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->lock = &ctx->dev->dev_mutex; + dst_vq->dev = ctx->dev->dev; + + ret = vb2_queue_init(dst_vq); + if (ret) + return ret; + + return 0; +} + +static int rotate_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct rotate_ctx *ctx = container_of(ctrl->handler, + struct rotate_ctx, + ctrl_handler); + struct v4l2_pix_format fmt; + + switch (ctrl->id) { + case V4L2_CID_HFLIP: + ctx->hflip = ctrl->val; + break; + case V4L2_CID_VFLIP: + ctx->vflip = ctrl->val; + break; + case V4L2_CID_ROTATE: + rotate_set_cap_format(ctx, &fmt, ctrl->val); + + /* Check if capture format needs to be changed */ + if (fmt.width != ctx->dst_fmt.width || + fmt.height != ctx->dst_fmt.height || + fmt.bytesperline != ctx->dst_fmt.bytesperline || + fmt.sizeimage != ctx->dst_fmt.sizeimage) { + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (vb2_is_busy(vq)) + return -EBUSY; + + rotate_set_cap_format(ctx, &ctx->dst_fmt, ctrl->val); + } + + ctx->rotate = ctrl->val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops rotate_ctrl_ops = { + .s_ctrl = rotate_s_ctrl, +}; + +static int rotate_setup_ctrls(struct rotate_ctx *ctx) +{ + v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); + + v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + + v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + + v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, + V4L2_CID_ROTATE, 0, 270, 90, 0); + + if (ctx->ctrl_handler.error) { + int err = ctx->ctrl_handler.error; + + v4l2_err(&ctx->dev->v4l2_dev, "control setup failed!\n"); + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + + return err; + } + + return v4l2_ctrl_handler_setup(&ctx->ctrl_handler); +} + +static int rotate_open(struct file *file) +{ + struct rotate_dev *dev = video_drvdata(file); + struct rotate_ctx *ctx = NULL; + int ret; + + if (mutex_lock_interruptible(&dev->dev_mutex)) + return -ERESTARTSYS; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + mutex_unlock(&dev->dev_mutex); + return -ENOMEM; + } + + /* default output format */ + ctx->src_fmt.pixelformat = V4L2_PIX_FMT_ARGB32; + ctx->src_fmt.field = V4L2_FIELD_NONE; + ctx->src_fmt.width = 640; + ctx->src_fmt.height = 480; + rotate_prepare_format(&ctx->src_fmt); + + /* default capture format */ + rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate); + + v4l2_fh_init(&ctx->fh, video_devdata(file)); + file->private_data = &ctx->fh; + ctx->dev = dev; + + ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, + &rotate_queue_init); + if (IS_ERR(ctx->fh.m2m_ctx)) { + ret = PTR_ERR(ctx->fh.m2m_ctx); + goto err_free; + } + + v4l2_fh_add(&ctx->fh); + + ret = rotate_setup_ctrls(ctx); + if (ret) + goto err_free; + + ctx->fh.ctrl_handler = &ctx->ctrl_handler; + + mutex_unlock(&dev->dev_mutex); + + return 0; + +err_free: + kfree(ctx); + mutex_unlock(&dev->dev_mutex); + + return ret; +} + +static int rotate_release(struct file *file) +{ + struct rotate_dev *dev = video_drvdata(file); + struct rotate_ctx *ctx = container_of(file->private_data, + struct rotate_ctx, fh); + + mutex_lock(&dev->dev_mutex); + + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); + v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); + + kfree(ctx); + + mutex_unlock(&dev->dev_mutex); + + return 0; +} + +static const struct v4l2_file_operations rotate_fops = { + .owner = THIS_MODULE, + .open = rotate_open, + .release = rotate_release, + .poll = v4l2_m2m_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = v4l2_m2m_fop_mmap, +}; + +static const struct video_device rotate_video_device = { + .name = ROTATE_NAME, + .vfl_dir = VFL_DIR_M2M, + .fops = &rotate_fops, + .ioctl_ops = &rotate_ioctl_ops, + .minor = -1, + .release = video_device_release_empty, + .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING, +}; + +static const struct v4l2_m2m_ops rotate_m2m_ops = { + .device_run = rotate_device_run, +}; + +static int rotate_probe(struct platform_device *pdev) +{ + struct rotate_dev *dev; + struct video_device *vfd; + int irq, ret; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->vfd = rotate_video_device; + dev->dev = &pdev->dev; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev->dev, "Failed to get IRQ\n"); + + return irq; + } + + ret = devm_request_irq(dev->dev, irq, rotate_irq, + 0, dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "Failed to request IRQ\n"); + + return ret; + } + + dev->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dev->base)) + return PTR_ERR(dev->base); + + dev->bus_clk = devm_clk_get(dev->dev, "bus"); + if (IS_ERR(dev->bus_clk)) { + dev_err(dev->dev, "Failed to get bus clock\n"); + + return PTR_ERR(dev->bus_clk); + } + + dev->mod_clk = devm_clk_get(dev->dev, "mod"); + if (IS_ERR(dev->mod_clk)) { + dev_err(dev->dev, "Failed to get mod clock\n"); + + return PTR_ERR(dev->mod_clk); + } + + dev->rstc = devm_reset_control_get(dev->dev, NULL); + if (IS_ERR(dev->rstc)) { + dev_err(dev->dev, "Failed to get reset control\n"); + + return PTR_ERR(dev->rstc); + } + + mutex_init(&dev->dev_mutex); + + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); + if (ret) { + dev_err(dev->dev, "Failed to register V4L2 device\n"); + + return ret; + } + + vfd = &dev->vfd; + vfd->lock = &dev->dev_mutex; + vfd->v4l2_dev = &dev->v4l2_dev; + + snprintf(vfd->name, sizeof(vfd->name), "%s", + rotate_video_device.name); + video_set_drvdata(vfd, dev); + + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); + if (ret) { + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); + + goto err_v4l2; + } + + v4l2_info(&dev->v4l2_dev, + "Device registered as /dev/video%d\n", vfd->num); + + dev->m2m_dev = v4l2_m2m_init(&rotate_m2m_ops); + if (IS_ERR(dev->m2m_dev)) { + v4l2_err(&dev->v4l2_dev, + "Failed to initialize V4L2 M2M device\n"); + ret = PTR_ERR(dev->m2m_dev); + + goto err_video; + } + + platform_set_drvdata(pdev, dev); + + pm_runtime_enable(dev->dev); + + return 0; + +err_video: + video_unregister_device(&dev->vfd); +err_v4l2: + v4l2_device_unregister(&dev->v4l2_dev); + + return ret; +} + +static int rotate_remove(struct platform_device *pdev) +{ + struct rotate_dev *dev = platform_get_drvdata(pdev); + + v4l2_m2m_release(dev->m2m_dev); + video_unregister_device(&dev->vfd); + v4l2_device_unregister(&dev->v4l2_dev); + + pm_runtime_force_suspend(&pdev->dev); + + return 0; +} + +static int rotate_runtime_resume(struct device *device) +{ + struct rotate_dev *dev = dev_get_drvdata(device); + int ret; + + ret = clk_prepare_enable(dev->bus_clk); + if (ret) { + dev_err(dev->dev, "Failed to enable bus clock\n"); + + return ret; + } + + ret = clk_prepare_enable(dev->mod_clk); + if (ret) { + dev_err(dev->dev, "Failed to enable mod clock\n"); + + goto err_bus_clk; + } + + ret = reset_control_deassert(dev->rstc); + if (ret) { + dev_err(dev->dev, "Failed to apply reset\n"); + + goto err_mod_clk; + } + + return 0; + +err_mod_clk: + clk_disable_unprepare(dev->mod_clk); +err_bus_clk: + clk_disable_unprepare(dev->bus_clk); + + return ret; +} + +static int rotate_runtime_suspend(struct device *device) +{ + struct rotate_dev *dev = dev_get_drvdata(device); + + reset_control_assert(dev->rstc); + + clk_disable_unprepare(dev->mod_clk); + clk_disable_unprepare(dev->bus_clk); + + return 0; +} + +static const struct of_device_id rotate_dt_match[] = { + { .compatible = "allwinner,sun8i-a83t-de2-rotate" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rotate_dt_match); + +static const struct dev_pm_ops rotate_pm_ops = { + .runtime_resume = rotate_runtime_resume, + .runtime_suspend = rotate_runtime_suspend, +}; + +static struct platform_driver rotate_driver = { + .probe = rotate_probe, + .remove = rotate_remove, + .driver = { + .name = ROTATE_NAME, + .of_match_table = rotate_dt_match, + .pm = &rotate_pm_ops, + }, +}; +module_platform_driver(rotate_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>"); +MODULE_DESCRIPTION("Allwinner DE2 rotate driver"); diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index be54806180a5..6c8f3702eac0 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -372,8 +372,6 @@ struct cal_ctx { struct v4l2_subdev *sensor; struct v4l2_fwnode_endpoint endpoint; - struct v4l2_async_subdev asd; - struct v4l2_fh fh; struct cal_dev *dev; struct cc_data *cc; @@ -722,16 +720,16 @@ static void enable_irqs(struct cal_ctx *ctx) static void disable_irqs(struct cal_ctx *ctx) { + u32 val; + /* Disable IRQ_WDMA_END 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(2), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val); /* Disable IRQ_WDMA_START 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(3), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val); /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); } @@ -1948,7 +1946,7 @@ static int cal_complete_ctx(struct cal_ctx *ctx) vfd->lock = &ctx->mutex; video_set_drvdata(vfd, ctx); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, video_nr); if (ret < 0) return ret; @@ -2032,7 +2030,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) parent = pdev->dev.of_node; - asd = &ctx->asd; endpoint = &ctx->endpoint; ep_node = NULL; @@ -2079,8 +2076,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) ctx_dbg(3, ctx, "can't get remote parent\n"); goto cleanup_exit; } - asd->match_type = V4L2_ASYNC_MATCH_FWNODE; - asd->match.fwnode = of_fwnode_handle(sensor_node); v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); @@ -2110,9 +2105,17 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) v4l2_async_notifier_init(&ctx->notifier); + asd = kzalloc(sizeof(*asd), GFP_KERNEL); + if (!asd) + goto cleanup_exit; + + asd->match_type = V4L2_ASYNC_MATCH_FWNODE; + asd->match.fwnode = of_fwnode_handle(sensor_node); + ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd); if (ret) { ctx_err(ctx, "Error adding asd\n"); + kfree(asd); goto cleanup_exit; } diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index 65c2c048b018..cff2fcd6d812 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -2500,7 +2500,7 @@ static void vpe_fw_cb(struct platform_device *pdev) vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { vpe_err(dev, "Failed to register video device\n"); diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c index 78841b9015ce..ed0ad68c5c48 100644 --- a/drivers/media/platform/via-camera.c +++ b/drivers/media/platform/via-camera.c @@ -646,7 +646,7 @@ static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) * requirement which will keep the CPU out of the deeper sleep * states. */ - pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50); + cpu_latency_qos_add_request(&cam->qos_request, 50); viacam_start_engine(cam); return 0; out: @@ -662,7 +662,7 @@ static void viacam_vb2_stop_streaming(struct vb2_queue *vq) struct via_camera *cam = vb2_get_drv_priv(vq); struct via_buffer *buf, *tmp; - pm_qos_remove_request(&cam->qos_request); + cpu_latency_qos_remove_request(&cam->qos_request); viacam_stop_engine(cam); list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) { @@ -1262,7 +1262,7 @@ static int viacam_probe(struct platform_device *pdev) cam->vdev.lock = &cam->lock; cam->vdev.queue = vq; video_set_drvdata(&cam->vdev, cam); - ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1); if (ret) goto out_irq; diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c index 3c93d9232c3c..b6e39fbd8ad5 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c @@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = { { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, - { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, - { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV}, { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB}, }; @@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_XRGB32: case V4L2_PIX_FMT_HSV32: - rf->cr = rf->luma + 1; - rf->cb = rf->cr + 2; - rf->luma += 2; - break; - case V4L2_PIX_FMT_BGR32: - case V4L2_PIX_FMT_XBGR32: - rf->cb = rf->luma; - rf->cr = rf->cb + 2; - rf->luma++; - break; case V4L2_PIX_FMT_ARGB32: rf->alpha = rf->luma; rf->cr = rf->luma + 1; rf->cb = rf->cr + 2; rf->luma += 2; break; + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XBGR32: case V4L2_PIX_FMT_ABGR32: rf->cb = rf->luma; rf->cr = rf->cb + 2; @@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, rf->alpha = rf->cr + 1; break; case V4L2_PIX_FMT_BGRX32: - rf->cb = rf->luma + 1; - rf->cr = rf->cb + 2; - rf->luma += 2; - break; case V4L2_PIX_FMT_BGRA32: rf->alpha = rf->luma; rf->cb = rf->luma + 1; @@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf, rf->luma += 2; break; case V4L2_PIX_FMT_RGBX32: - rf->cr = rf->luma; - rf->cb = rf->cr + 2; - rf->luma++; - break; case V4L2_PIX_FMT_RGBA32: rf->alpha = rf->luma + 3; rf->cr = rf->luma; diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 82350097503e..30ced1c21387 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -117,15 +117,10 @@ struct vicodec_ctx { struct vicodec_dev *dev; bool is_enc; bool is_stateless; - bool is_draining; - bool next_is_last; - bool has_stopped; spinlock_t *lock; struct v4l2_ctrl_handler hdl; - struct vb2_v4l2_buffer *last_src_buf; - /* Source and destination queue data */ struct vicodec_q_data q_data[2]; struct v4l2_fwht_state state; @@ -431,11 +426,11 @@ static void device_run(void *priv) v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false); spin_lock(ctx->lock); - if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) { + if (!ctx->comp_has_next_frame && + v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) { dst_buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); - ctx->is_draining = false; - ctx->has_stopped = true; + v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx); } if (ctx->is_enc || ctx->is_stateless) { src_buf->sequence = q_src->sequence++; @@ -586,8 +581,6 @@ static int job_ready(void *priv) unsigned int max_to_copy; unsigned int comp_frame_size; - if (ctx->has_stopped) - return 0; if (ctx->source_changed) return 0; if (ctx->is_stateless || ctx->is_enc || ctx->comp_has_frame) @@ -607,7 +600,8 @@ restart: if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { state = get_next_header(ctx, &p, p_src + sz - p); if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { - if (ctx->is_draining && src_buf == ctx->last_src_buf) + if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, + src_buf)) return 1; job_remove_src_buf(ctx, state); goto restart; @@ -636,7 +630,8 @@ restart: p += copy; ctx->comp_size += copy; if (ctx->comp_size < max_to_copy) { - if (ctx->is_draining && src_buf == ctx->last_src_buf) + if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, + src_buf)) return 1; job_remove_src_buf(ctx, state); goto restart; @@ -1219,41 +1214,6 @@ static int vidioc_s_selection(struct file *file, void *priv, return 0; } -static int vicodec_mark_last_buf(struct vicodec_ctx *ctx) -{ - struct vb2_v4l2_buffer *next_dst_buf; - int ret = 0; - - spin_lock(ctx->lock); - if (ctx->is_draining) { - ret = -EBUSY; - goto unlock; - } - if (ctx->has_stopped) - goto unlock; - - ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx); - ctx->is_draining = true; - if (ctx->last_src_buf) - goto unlock; - - next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - if (!next_dst_buf) { - ctx->next_is_last = true; - goto unlock; - } - - next_dst_buf->flags |= V4L2_BUF_FLAG_LAST; - vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE); - ctx->is_draining = false; - ctx->has_stopped = true; - v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); - -unlock: - spin_unlock(ctx->lock); - return ret; -} - static int vicodec_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec) { @@ -1268,18 +1228,19 @@ static int vicodec_encoder_cmd(struct file *file, void *fh, !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; - if (ec->cmd == V4L2_ENC_CMD_STOP) - return vicodec_mark_last_buf(ctx); - ret = 0; - spin_lock(ctx->lock); - if (ctx->is_draining) { - ret = -EBUSY; - } else if (ctx->has_stopped) { - ctx->has_stopped = false; + ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec); + if (ret < 0) + return ret; + + if (ec->cmd == V4L2_ENC_CMD_STOP && + v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) + v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); + + if (ec->cmd == V4L2_ENC_CMD_START && + v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); - } - spin_unlock(ctx->lock); - return ret; + + return 0; } static int vicodec_decoder_cmd(struct file *file, void *fh, @@ -1296,18 +1257,19 @@ static int vicodec_decoder_cmd(struct file *file, void *fh, !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; - if (dc->cmd == V4L2_DEC_CMD_STOP) - return vicodec_mark_last_buf(ctx); - ret = 0; - spin_lock(ctx->lock); - if (ctx->is_draining) { - ret = -EBUSY; - } else if (ctx->has_stopped) { - ctx->has_stopped = false; + ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc); + if (ret < 0) + return ret; + + if (dc->cmd == V4L2_DEC_CMD_STOP && + v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) + v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); + + if (dc->cmd == V4L2_DEC_CMD_START && + v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); - } - spin_unlock(ctx->lock); - return ret; + + return 0; } static int vicodec_enum_framesizes(struct file *file, void *fh, @@ -1480,23 +1442,21 @@ static void vicodec_buf_queue(struct vb2_buffer *vb) .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; - if (vb2_is_streaming(vq_cap)) { - if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) && - ctx->next_is_last) { - unsigned int i; + if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) && + vb2_is_streaming(vb->vb2_queue) && + v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) { + unsigned int i; - for (i = 0; i < vb->num_planes; i++) - vb->planes[i].bytesused = 0; - vbuf->flags = V4L2_BUF_FLAG_LAST; - vbuf->field = V4L2_FIELD_NONE; - vbuf->sequence = get_q_data(ctx, vb->vb2_queue->type)->sequence++; - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - ctx->is_draining = false; - ctx->has_stopped = true; - ctx->next_is_last = false; - v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); - return; - } + for (i = 0; i < vb->num_planes; i++) + vb->planes[i].bytesused = 0; + + vbuf->field = V4L2_FIELD_NONE; + vbuf->sequence = + get_q_data(ctx, vb->vb2_queue->type)->sequence++; + + v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf); + v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); + return; } /* buf_queue handles only the first source change event */ @@ -1609,8 +1569,7 @@ static int vicodec_start_streaming(struct vb2_queue *q, chroma_div = info->width_div * info->height_div; q_data->sequence = 0; - if (V4L2_TYPE_IS_OUTPUT(q->type)) - ctx->last_src_buf = NULL; + v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q); state->gop_cnt = 0; @@ -1689,29 +1648,12 @@ static void vicodec_stop_streaming(struct vb2_queue *q) vicodec_return_bufs(q, VB2_BUF_STATE_ERROR); - if (V4L2_TYPE_IS_OUTPUT(q->type)) { - if (ctx->is_draining) { - struct vb2_v4l2_buffer *next_dst_buf; - - spin_lock(ctx->lock); - ctx->last_src_buf = NULL; - next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - if (!next_dst_buf) { - ctx->next_is_last = true; - } else { - next_dst_buf->flags |= V4L2_BUF_FLAG_LAST; - vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE); - ctx->is_draining = false; - ctx->has_stopped = true; - v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); - } - spin_unlock(ctx->lock); - } - } else { - ctx->is_draining = false; - ctx->has_stopped = false; - ctx->next_is_last = false; - } + v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q); + + if (V4L2_TYPE_IS_OUTPUT(q->type) && + v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) + v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); + if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type)) ctx->first_source_change_sent = false; @@ -2120,7 +2062,7 @@ static int register_instance(struct vicodec_dev *dev, } video_set_drvdata(vfd, dev); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device '%s'\n", name); v4l2_m2m_release(dev_instance->m2m_dev); diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 8d6b09623d88..ac6717fbb764 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -1333,7 +1333,7 @@ static int vim2m_probe(struct platform_device *pdev) vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto error_v4l2; diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index 76c015898cfd..23e740c1c5c0 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c @@ -325,20 +325,20 @@ static const struct media_entity_operations vimc_cap_mops = { .link_validate = vimc_vdev_link_validate, }; -static void vimc_cap_release(struct video_device *vdev) +void vimc_cap_release(struct vimc_ent_device *ved) { struct vimc_cap_device *vcap = - container_of(vdev, struct vimc_cap_device, vdev); + container_of(ved, struct vimc_cap_device, ved); media_entity_cleanup(vcap->ved.ent); kfree(vcap); } -void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved) +void vimc_cap_unregister(struct vimc_ent_device *ved) { - struct vimc_cap_device *vcap; + struct vimc_cap_device *vcap = + container_of(ved, struct vimc_cap_device, ved); - vcap = container_of(ved, struct vimc_cap_device, ved); vb2_queue_release(&vcap->queue); video_unregister_device(&vcap->vdev); } @@ -423,7 +423,7 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc, ret = vb2_queue_init(q); if (ret) { - dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n", + dev_err(vimc->mdev.dev, "%s: vb2 queue init failed (err=%d)\n", vcfg_name, ret); goto err_clean_m_ent; } @@ -443,13 +443,13 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc, vcap->ved.ent = &vcap->vdev.entity; vcap->ved.process_frame = vimc_cap_process_frame; vcap->ved.vdev_get_format = vimc_cap_get_format; - vcap->ved.dev = &vimc->pdev.dev; + vcap->ved.dev = vimc->mdev.dev; /* Initialize the video_device struct */ vdev = &vcap->vdev; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; vdev->entity.ops = &vimc_cap_mops; - vdev->release = vimc_cap_release; + vdev->release = video_device_release_empty; vdev->fops = &vimc_cap_fops; vdev->ioctl_ops = &vimc_cap_ioctl_ops; vdev->lock = &vcap->lock; @@ -460,9 +460,9 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc, video_set_drvdata(vdev, &vcap->ved); /* Register the video_device with the v4l2 and the media framework */ - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { - dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n", + dev_err(vimc->mdev.dev, "%s: video register failed (err=%d)\n", vcap->vdev.name, ret); goto err_release_queue; } diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c index 16ce9f3b7c75..c95c17c048f2 100644 --- a/drivers/media/platform/vimc/vimc-common.c +++ b/drivers/media/platform/vimc/vimc-common.c @@ -327,7 +327,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved, u32 function, u16 num_pads, struct media_pad *pads, - const struct v4l2_subdev_internal_ops *sd_int_ops, const struct v4l2_subdev_ops *sd_ops) { int ret; @@ -337,7 +336,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved, /* Initialize the subdev */ v4l2_subdev_init(sd, sd_ops); - sd->internal_ops = sd_int_ops; sd->entity.function = function; sd->entity.ops = &vimc_ent_sd_mops; sd->owner = THIS_MODULE; diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h index 87eb8259c2a8..616d5a6b0754 100644 --- a/drivers/media/platform/vimc/vimc-common.h +++ b/drivers/media/platform/vimc/vimc-common.h @@ -106,14 +106,12 @@ struct vimc_ent_device { /** * struct vimc_device - main device for vimc driver * - * @pdev pointer to the platform device * @pipe_cfg pointer to the vimc pipeline configuration structure * @ent_devs array of vimc_ent_device pointers * @mdev the associated media_device parent * @v4l2_dev Internal v4l2 parent device */ struct vimc_device { - struct platform_device pdev; const struct vimc_pipeline_config *pipe_cfg; struct vimc_ent_device **ent_devs; struct media_device mdev; @@ -127,16 +125,18 @@ struct vimc_device { * @name entity name * @ved pointer to vimc_ent_device (a node in the * topology) - * @add subdev add hook - initializes and registers - * subdev called from vimc-core - * @rm subdev rm hook - unregisters and frees - * subdev called from vimc-core + * @add initializes and registers + * vim entity - called from vimc-core + * @unregister unregisters vimc entity - called from vimc-core + * @release releases vimc entity - called from the v4l2_dev + * release callback */ struct vimc_ent_config { const char *name; struct vimc_ent_device *(*add)(struct vimc_device *vimc, const char *vcfg_name); - void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved); + void (*unregister)(struct vimc_ent_device *ved); + void (*release)(struct vimc_ent_device *ved); }; /** @@ -147,22 +147,23 @@ struct vimc_ent_config { */ bool vimc_is_source(struct media_entity *ent); -/* prototypes for vimc_ent_config add and rm hooks */ +/* prototypes for vimc_ent_config hooks */ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc, const char *vcfg_name); -void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved); +void vimc_cap_unregister(struct vimc_ent_device *ved); +void vimc_cap_release(struct vimc_ent_device *ved); struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc, const char *vcfg_name); -void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved); +void vimc_deb_release(struct vimc_ent_device *ved); struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc, const char *vcfg_name); -void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved); +void vimc_sca_release(struct vimc_ent_device *ved); struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc, const char *vcfg_name); -void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved); +void vimc_sen_release(struct vimc_ent_device *ved); /** * vimc_pix_map_by_index - get vimc_pix_map struct by its index @@ -197,7 +198,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat); * @num_pads: number of pads to initialize * @pads: the array of pads of the entity, the caller should set the flags of the pads - * @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops * @sd_ops: pointer to &struct v4l2_subdev_ops. * * Helper function initialize and register the struct vimc_ent_device and struct @@ -210,7 +210,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved, u32 function, u16 num_pads, struct media_pad *pads, - const struct v4l2_subdev_internal_ops *sd_int_ops, const struct v4l2_subdev_ops *sd_ops); /** diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c index 97a272f3350a..339126e565dc 100644 --- a/drivers/media/platform/vimc/vimc-core.c +++ b/drivers/media/platform/vimc/vimc-core.c @@ -48,48 +48,51 @@ static struct vimc_ent_config ent_config[] = { { .name = "Sensor A", .add = vimc_sen_add, - .rm = vimc_sen_rm, + .release = vimc_sen_release, }, { .name = "Sensor B", .add = vimc_sen_add, - .rm = vimc_sen_rm, + .release = vimc_sen_release, }, { .name = "Debayer A", .add = vimc_deb_add, - .rm = vimc_deb_rm, + .release = vimc_deb_release, }, { .name = "Debayer B", .add = vimc_deb_add, - .rm = vimc_deb_rm, + .release = vimc_deb_release, }, { .name = "Raw Capture 0", .add = vimc_cap_add, - .rm = vimc_cap_rm, + .unregister = vimc_cap_unregister, + .release = vimc_cap_release, }, { .name = "Raw Capture 1", .add = vimc_cap_add, - .rm = vimc_cap_rm, + .unregister = vimc_cap_unregister, + .release = vimc_cap_release, }, { /* TODO: change this to vimc-input when it is implemented */ .name = "RGB/YUV Input", .add = vimc_sen_add, - .rm = vimc_sen_rm, + .release = vimc_sen_release, }, { .name = "Scaler", .add = vimc_sca_add, - .rm = vimc_sca_rm, + .release = vimc_sca_release, }, { .name = "RGB/YUV Capture", .add = vimc_cap_add, - .rm = vimc_cap_rm, + .unregister = vimc_cap_unregister, + .release = vimc_cap_release, }, }; @@ -162,12 +165,12 @@ static int vimc_add_subdevs(struct vimc_device *vimc) unsigned int i; for (i = 0; i < vimc->pipe_cfg->num_ents; i++) { - dev_dbg(&vimc->pdev.dev, "new entity for %s\n", + dev_dbg(vimc->mdev.dev, "new entity for %s\n", vimc->pipe_cfg->ents[i].name); vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc, vimc->pipe_cfg->ents[i].name); if (!vimc->ent_devs[i]) { - dev_err(&vimc->pdev.dev, "add new entity for %s\n", + dev_err(vimc->mdev.dev, "add new entity for %s\n", vimc->pipe_cfg->ents[i].name); return -EINVAL; } @@ -175,13 +178,33 @@ static int vimc_add_subdevs(struct vimc_device *vimc) return 0; } -static void vimc_rm_subdevs(struct vimc_device *vimc) +static void vimc_release_subdevs(struct vimc_device *vimc) { unsigned int i; for (i = 0; i < vimc->pipe_cfg->num_ents; i++) if (vimc->ent_devs[i]) - vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]); + vimc->pipe_cfg->ents[i].release(vimc->ent_devs[i]); +} + +static void vimc_unregister_subdevs(struct vimc_device *vimc) +{ + unsigned int i; + + for (i = 0; i < vimc->pipe_cfg->num_ents; i++) + if (vimc->ent_devs[i] && vimc->pipe_cfg->ents[i].unregister) + vimc->pipe_cfg->ents[i].unregister(vimc->ent_devs[i]); +} + +static void vimc_v4l2_dev_release(struct v4l2_device *v4l2_dev) +{ + struct vimc_device *vimc = + container_of(v4l2_dev, struct vimc_device, v4l2_dev); + + vimc_release_subdevs(vimc); + media_device_cleanup(&vimc->mdev); + kfree(vimc->ent_devs); + kfree(vimc); } static int vimc_register_devices(struct vimc_device *vimc) @@ -195,7 +218,6 @@ static int vimc_register_devices(struct vimc_device *vimc) "v4l2 device register failed (err=%d)\n", ret); return ret; } - /* allocate ent_devs */ vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents, sizeof(*vimc->ent_devs), GFP_KERNEL); @@ -236,9 +258,9 @@ static int vimc_register_devices(struct vimc_device *vimc) err_mdev_unregister: media_device_unregister(&vimc->mdev); - media_device_cleanup(&vimc->mdev); err_rm_subdevs: - vimc_rm_subdevs(vimc); + vimc_unregister_subdevs(vimc); + vimc_release_subdevs(vimc); kfree(vimc->ent_devs); err_v4l2_unregister: v4l2_device_unregister(&vimc->v4l2_dev); @@ -248,20 +270,23 @@ err_v4l2_unregister: static void vimc_unregister(struct vimc_device *vimc) { + vimc_unregister_subdevs(vimc); media_device_unregister(&vimc->mdev); - media_device_cleanup(&vimc->mdev); v4l2_device_unregister(&vimc->v4l2_dev); - kfree(vimc->ent_devs); } static int vimc_probe(struct platform_device *pdev) { - struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev); + struct vimc_device *vimc; int ret; dev_dbg(&pdev->dev, "probe"); - memset(&vimc->mdev, 0, sizeof(vimc->mdev)); + vimc = kzalloc(sizeof(*vimc), GFP_KERNEL); + if (!vimc) + return -ENOMEM; + + vimc->pipe_cfg = &pipe_cfg; /* Link the media device within the v4l2_device */ vimc->v4l2_dev.mdev = &vimc->mdev; @@ -277,20 +302,27 @@ static int vimc_probe(struct platform_device *pdev) ret = vimc_register_devices(vimc); if (ret) { media_device_cleanup(&vimc->mdev); + kfree(vimc); return ret; } + /* + * the release cb is set only after successful registration. + * if the registration fails, we release directly from probe + */ + vimc->v4l2_dev.release = vimc_v4l2_dev_release; + platform_set_drvdata(pdev, vimc); return 0; } static int vimc_remove(struct platform_device *pdev) { - struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev); + struct vimc_device *vimc = platform_get_drvdata(pdev); dev_dbg(&pdev->dev, "remove"); - vimc_rm_subdevs(vimc); vimc_unregister(vimc); + v4l2_device_put(&vimc->v4l2_dev); return 0; } @@ -299,12 +331,9 @@ static void vimc_dev_release(struct device *dev) { } -static struct vimc_device vimc_dev = { - .pipe_cfg = &pipe_cfg, - .pdev = { - .name = VIMC_PDEV_NAME, - .dev.release = vimc_dev_release, - } +static struct platform_device vimc_pdev = { + .name = VIMC_PDEV_NAME, + .dev.release = vimc_dev_release, }; static struct platform_driver vimc_pdrv = { @@ -319,16 +348,16 @@ static int __init vimc_init(void) { int ret; - ret = platform_device_register(&vimc_dev.pdev); + ret = platform_device_register(&vimc_pdev); if (ret) { - dev_err(&vimc_dev.pdev.dev, + dev_err(&vimc_pdev.dev, "platform device registration failed (err=%d)\n", ret); return ret; } ret = platform_driver_register(&vimc_pdrv); if (ret) { - dev_err(&vimc_dev.pdev.dev, + dev_err(&vimc_pdev.dev, "platform driver registration failed (err=%d)\n", ret); platform_driver_unregister(&vimc_pdrv); return ret; @@ -341,7 +370,7 @@ static void __exit vimc_exit(void) { platform_driver_unregister(&vimc_pdrv); - platform_device_unregister(&vimc_dev.pdev); + platform_device_unregister(&vimc_pdev); } module_init(vimc_init); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 5d1b67d684bb..baf6bf9f65b5 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -494,28 +494,16 @@ static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = { .s_ctrl = vimc_deb_s_ctrl, }; -static void vimc_deb_release(struct v4l2_subdev *sd) +void vimc_deb_release(struct vimc_ent_device *ved) { struct vimc_deb_device *vdeb = - container_of(sd, struct vimc_deb_device, sd); + container_of(ved, struct vimc_deb_device, ved); v4l2_ctrl_handler_free(&vdeb->hdl); media_entity_cleanup(vdeb->ved.ent); kfree(vdeb); } -static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = { - .release = vimc_deb_release, -}; - -void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved) -{ - struct vimc_deb_device *vdeb; - - vdeb = container_of(ved, struct vimc_deb_device, ved); - v4l2_device_unregister_subdev(&vdeb->sd); -} - static const struct v4l2_ctrl_config vimc_deb_ctrl_class = { .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY, .id = VIMC_CID_VIMC_CLASS, @@ -563,13 +551,12 @@ struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc, ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev, vcfg_name, MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2, - vdeb->pads, - &vimc_deb_int_ops, &vimc_deb_ops); + vdeb->pads, &vimc_deb_ops); if (ret) goto err_free_hdl; vdeb->ved.process_frame = vimc_deb_process_frame; - vdeb->ved.dev = &vimc->pdev.dev; + vdeb->ved.dev = vimc->mdev.dev; vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def; /* Initialize the frame format */ diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index e2e551bc3ded..7521439747c5 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -464,27 +464,15 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved, return vsca->src_frame; }; -static void vimc_sca_release(struct v4l2_subdev *sd) +void vimc_sca_release(struct vimc_ent_device *ved) { struct vimc_sca_device *vsca = - container_of(sd, struct vimc_sca_device, sd); + container_of(ved, struct vimc_sca_device, ved); media_entity_cleanup(vsca->ved.ent); kfree(vsca); } -static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = { - .release = vimc_sca_release, -}; - -void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved) -{ - struct vimc_sca_device *vsca; - - vsca = container_of(ved, struct vimc_sca_device, ved); - v4l2_device_unregister_subdev(&vsca->sd); -} - struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc, const char *vcfg_name) { @@ -504,15 +492,14 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc, ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev, vcfg_name, MEDIA_ENT_F_PROC_VIDEO_SCALER, 2, - vsca->pads, - &vimc_sca_int_ops, &vimc_sca_ops); + vsca->pads, &vimc_sca_ops); if (ret) { kfree(vsca); return NULL; } vsca->ved.process_frame = vimc_sca_process_frame; - vsca->ved.dev = &vimc->pdev.dev; + vsca->ved.dev = vimc->mdev.dev; /* Initialize the frame format */ vsca->sink_fmt = sink_fmt_default; diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index 32380f504591..92daee58209e 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -279,10 +279,10 @@ static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = { .s_ctrl = vimc_sen_s_ctrl, }; -static void vimc_sen_release(struct v4l2_subdev *sd) +void vimc_sen_release(struct vimc_ent_device *ved) { struct vimc_sen_device *vsen = - container_of(sd, struct vimc_sen_device, sd); + container_of(ved, struct vimc_sen_device, ved); v4l2_ctrl_handler_free(&vsen->hdl); tpg_free(&vsen->tpg); @@ -290,18 +290,6 @@ static void vimc_sen_release(struct v4l2_subdev *sd) kfree(vsen); } -static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = { - .release = vimc_sen_release, -}; - -void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved) -{ - struct vimc_sen_device *vsen; - - vsen = container_of(ved, struct vimc_sen_device, ved); - v4l2_device_unregister_subdev(&vsen->sd); -} - /* Image Processing Controls */ static const struct v4l2_ctrl_config vimc_sen_ctrl_class = { .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY, @@ -365,12 +353,12 @@ struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc, ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev, vcfg_name, MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad, - &vimc_sen_int_ops, &vimc_sen_ops); + &vimc_sen_ops); if (ret) goto err_free_tpg; vsen->ved.process_frame = vimc_sen_process_frame; - vsen->ved.dev = &vimc->pdev.dev; + vsen->ved.dev = vimc->mdev.dev; /* Initialize the frame format */ vsen->mbus_format = fmt_default; diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c index cd6b55433c9e..65feb3c596db 100644 --- a/drivers/media/platform/vimc/vimc-streamer.c +++ b/drivers/media/platform/vimc/vimc-streamer.c @@ -207,16 +207,27 @@ int vimc_streamer_s_stream(struct vimc_stream *stream, stream->kthread = kthread_run(vimc_streamer_thread, stream, "vimc-streamer thread"); - if (IS_ERR(stream->kthread)) - return PTR_ERR(stream->kthread); + if (IS_ERR(stream->kthread)) { + ret = PTR_ERR(stream->kthread); + dev_err(ved->dev, "kthread_run failed with %d\n", ret); + vimc_streamer_pipeline_terminate(stream); + stream->kthread = NULL; + return ret; + } } else { if (!stream->kthread) return 0; ret = kthread_stop(stream->kthread); + /* + * kthread_stop returns -EINTR in cases when streamon was + * immediately followed by streamoff, and the thread didn't had + * a chance to run. Ignore errors to stop the stream in the + * pipeline. + */ if (ret) - return ret; + dev_dbg(ved->dev, "kthread_stop returned '%d'\n", ret); stream->kthread = NULL; diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index 15091cbf6de7..6c740e3e6999 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -407,7 +407,7 @@ static int vidioc_log_status(struct file *file, void *fh) struct video_device *vdev = video_devdata(file); v4l2_ctrl_log_status(file, fh); - if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO) tpg_log_status(&dev->tpg); return 0; } @@ -1525,7 +1525,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) } #endif - ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_cap_nr[inst]); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_cap_nr[inst]); if (ret < 0) goto unreg_dev; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n", @@ -1571,14 +1571,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) } v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n", dev_name(&dev->cec_tx_adap[i]->devnode.dev), i); - if (i <= out_type_counter[HDMI]) - cec_s_phys_addr(dev->cec_tx_adap[i], i << 12, false); + if (i < out_type_counter[HDMI]) + cec_s_phys_addr(dev->cec_tx_adap[i], (i + 1) << 12, false); else cec_s_phys_addr(dev->cec_tx_adap[i], 0x1000, false); } #endif - ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_out_nr[inst]); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_out_nr[inst]); if (ret < 0) goto unreg_dev; v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n", @@ -1734,7 +1734,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) if (ret) goto unreg_dev; #endif - ret = video_register_device(vfd, VFL_TYPE_GRABBER, + ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_cap_nr[inst]); if (ret < 0) goto unreg_dev; @@ -1764,7 +1764,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) if (ret) goto unreg_dev; #endif - ret = video_register_device(vfd, VFL_TYPE_GRABBER, + ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_out_nr[inst]); if (ret < 0) goto unreg_dev; diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c index 30d751f2cccf..a91e142bcb94 100644 --- a/drivers/media/platform/vsp1/vsp1_histo.c +++ b/drivers/media/platform/vsp1/vsp1_histo.c @@ -551,7 +551,7 @@ int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo, histo->video.fops = &histo_v4l2_fops; snprintf(histo->video.name, sizeof(histo->video.name), "%s histo", histo->entity.subdev.name); - histo->video.vfl_type = VFL_TYPE_GRABBER; + histo->video.vfl_type = VFL_TYPE_VIDEO; histo->video.release = video_device_release_empty; histo->video.ioctl_ops = &histo_v4l2_ioctl_ops; histo->video.device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING; @@ -576,7 +576,7 @@ int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo, /* ... and register the video device. */ histo->video.queue = &histo->queue; - ret = video_register_device(&histo->video, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&histo->video, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(vsp1->dev, "failed to register video device\n"); goto error; diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h index 5c67ff92d97a..fe3130db1fa2 100644 --- a/drivers/media/platform/vsp1/vsp1_regs.h +++ b/drivers/media/platform/vsp1/vsp1_regs.h @@ -706,7 +706,7 @@ #define VI6_HGT_HUE_AREA_LOWER_SHIFT 16 #define VI6_HGT_HUE_AREA_UPPER_SHIFT 0 #define VI6_HGT_LB_TH 0x3424 -#define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8) +#define VI6_HGT_LBn_H(n) (0x3428 + (n) * 8) #define VI6_HGT_LBn_V(n) (0x342c + (n) * 8) #define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4) #define VI6_HGT_MAXMIN 0x3750 diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 5e59ed2c3614..044eb5778820 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -1293,7 +1293,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, video->video.fops = &vsp1_video_fops; snprintf(video->video.name, sizeof(video->video.name), "%s %s", rwpf->entity.subdev.name, direction); - video->video.vfl_type = VFL_TYPE_GRABBER; + video->video.vfl_type = VFL_TYPE_VIDEO; video->video.release = video_device_release_empty; video->video.ioctl_ops = &vsp1_video_ioctl_ops; @@ -1316,7 +1316,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, /* ... and register the video device. */ video->video.queue = &video->queue; - ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(video->vsp1->dev, "failed to register video device\n"); goto error; diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c index b211380a11f2..2a56201cb853 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.c +++ b/drivers/media/platform/xilinx/xilinx-dma.c @@ -685,7 +685,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, xdev->dev->of_node, type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input", port); - dma->video.vfl_type = VFL_TYPE_GRABBER; + dma->video.vfl_type = VFL_TYPE_VIDEO; dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? VFL_DIR_RX : VFL_DIR_TX; dma->video.release = video_device_release_empty; @@ -725,16 +725,17 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, /* ... and the DMA channel. */ snprintf(name, sizeof(name), "port%u", port); - dma->dma = dma_request_slave_channel(dma->xdev->dev, name); - if (dma->dma == NULL) { - dev_err(dma->xdev->dev, "no VDMA channel found\n"); - ret = -ENODEV; + dma->dma = dma_request_chan(dma->xdev->dev, name); + if (IS_ERR(dma->dma)) { + ret = PTR_ERR(dma->dma); + if (ret != -EPROBE_DEFER) + dev_err(dma->xdev->dev, "no VDMA channel found\n"); goto error; } dma->align = 1 << dma->dma->device->copy_align; - ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(dma->xdev->dev, "failed to register video device\n"); goto error; @@ -752,7 +753,7 @@ void xvip_dma_cleanup(struct xvip_dma *dma) if (video_is_registered(&dma->video)) video_unregister_device(&dma->video); - if (dma->dma) + if (!IS_ERR_OR_NULL(dma->dma)) dma_release_channel(dma->dma); media_entity_cleanup(&dma->video.entity); diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig index 537f8e1601f3..a1ba8bc54b62 100644 --- a/drivers/media/radio/si470x/Kconfig +++ b/drivers/media/radio/si470x/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config RADIO_SI470X - tristate "Silicon Labs Si470x FM Radio Receiver support" - depends on VIDEO_V4L2 + tristate "Silicon Labs Si470x FM Radio Receiver support" + depends on VIDEO_V4L2 help This is a driver for devices with the Silicon Labs SI470x chip (either via USB or I2C buses). diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 0a0ce620e4a2..0f3417d161b8 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -35,11 +35,6 @@ static const struct bpf_func_proto rc_repeat_proto = { .arg1_type = ARG_PTR_TO_CTX, }; -/* - * Currently rc-core does not support 64-bit scancodes, but there are many - * known protocols with more than 32 bits. So, define the interface as u64 - * as a future-proof. - */ BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode, u32, toggle) { diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index a7deca1fefb7..3c8bd13d029a 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -76,7 +76,7 @@ struct send_packet { uint8_t channels; uint8_t busy7; uint8_t busy4; - uint8_t payload[0]; + uint8_t payload[]; }; static void process_ir_data(struct iguanair *ir, unsigned len) diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c index 74a1d30fae6e..4c3d03876200 100644 --- a/drivers/media/rc/ir-xmp-decoder.c +++ b/drivers/media/rc/ir-xmp-decoder.c @@ -166,7 +166,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev) } else if (geq_margin(ev.duration, XMP_NIBBLE_PREFIX, XMP_UNIT)) { /* store nibble raw data, decode after trailer */ if (data->count == 16) { - dev_dbg(&dev->dev, "to many pulses (%d) ignoring: %u\n", + dev_dbg(&dev->dev, "too many pulses (%d) ignoring: %u\n", data->count, ev.duration); data->state = STATE_INACTIVE; return -EINVAL; diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile index 63261ef6380a..aaa1bf81d00d 100644 --- a/drivers/media/rc/keymaps/Makefile +++ b/drivers/media/rc/keymaps/Makefile @@ -119,6 +119,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-videomate-m1f.o \ rc-videomate-s350.o \ rc-videomate-tv-pvr.o \ + rc-videostrong-kii-pro.o \ rc-wetek-hub.o \ rc-wetek-play2.o \ rc-winfast.o \ diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c new file mode 100644 index 000000000000..414d4d231e7e --- /dev/null +++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (C) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com> + +#include <media/rc-map.h> +#include <linux/module.h> + +// +// Keytable for the Videostrong KII Pro STB remote control +// + +static struct rc_map_table kii_pro[] = { + { 0x59, KEY_POWER }, + { 0x19, KEY_MUTE }, + { 0x42, KEY_RED }, + { 0x40, KEY_GREEN }, + { 0x00, KEY_YELLOW }, + { 0x03, KEY_BLUE }, + { 0x4a, KEY_BACK }, + { 0x48, KEY_FORWARD }, + { 0x08, KEY_PREVIOUSSONG}, + { 0x0b, KEY_NEXTSONG}, + { 0x46, KEY_PLAYPAUSE }, + { 0x44, KEY_STOP }, + { 0x1f, KEY_FAVORITES}, //KEY_F5? + { 0x04, KEY_PVR }, + { 0x4d, KEY_EPG }, + { 0x02, KEY_INFO }, + { 0x09, KEY_SUBTITLE }, + { 0x01, KEY_AUDIO }, + { 0x0d, KEY_HOMEPAGE }, + { 0x11, KEY_TV }, // DTV ? + { 0x06, KEY_UP }, + { 0x5a, KEY_LEFT }, + { 0x1a, KEY_ENTER }, // KEY_OK ? + { 0x1b, KEY_RIGHT }, + { 0x16, KEY_DOWN }, + { 0x45, KEY_MENU }, + { 0x05, KEY_ESC }, + { 0x13, KEY_VOLUMEUP }, + { 0x17, KEY_VOLUMEDOWN }, + { 0x58, KEY_APPSELECT }, + { 0x12, KEY_VENDOR }, // mouse + { 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ? + { 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ? + { 0x52, KEY_1 }, + { 0x50, KEY_2 }, + { 0x10, KEY_3 }, + { 0x56, KEY_4 }, + { 0x54, KEY_5 }, + { 0x14, KEY_6 }, + { 0x4e, KEY_7 }, + { 0x4c, KEY_8 }, + { 0x0c, KEY_9 }, + { 0x18, KEY_WWW }, // KEY_F7 + { 0x0f, KEY_0 }, + { 0x51, KEY_BACKSPACE }, +}; + +static struct rc_map_list kii_pro_map = { + .map = { + .scan = kii_pro, + .size = ARRAY_SIZE(kii_pro), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_KII_PRO, + } +}; + +static int __init init_rc_map_kii_pro(void) +{ + return rc_map_register(&kii_pro_map); +} + +static void __exit exit_rc_map_kii_pro(void) +{ + rc_map_unregister(&kii_pro_map); +} + +module_init(init_rc_map_kii_pro) +module_exit(exit_rc_map_kii_pro) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>"); diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 9a8c1cf54ac4..583e4f32a0da 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -269,12 +269,7 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf, goto out_unlock; } - /* - * The scancode field in lirc_scancode is 64-bit simply - * to future-proof it, since there are IR protocols encode - * use more than 32 bits. For now only 32-bit protocols - * are supported. - */ + /* We only have encoders for 32-bit protocols. */ if (scan.scancode > U32_MAX || !rc_validate_scancode(scan.rc_proto, scan.scancode)) { ret = -EINVAL; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 5c2cd8d2d155..48a69bf23236 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -230,10 +230,10 @@ static ssize_t wakeup_data_show(struct device *dev, for (i = 0; i < fifo_len; i++) { duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD; - buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, + buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "%d ", duration); } - buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n"); + buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n"); spin_unlock_irqrestore(&nvt->lock, flags); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 6f80c251f641..d7064d664d52 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -164,6 +164,41 @@ static struct rc_map_list empty_map = { }; /** + * scancode_to_u64() - converts scancode in &struct input_keymap_entry + * @ke: keymap entry containing scancode to be converted. + * @scancode: pointer to the location where converted scancode should + * be stored. + * + * This function is a version of input_scancode_to_scalar specialized for + * rc-core. + */ +static int scancode_to_u64(const struct input_keymap_entry *ke, u64 *scancode) +{ + switch (ke->len) { + case 1: + *scancode = *((u8 *)ke->scancode); + break; + + case 2: + *scancode = *((u16 *)ke->scancode); + break; + + case 4: + *scancode = *((u32 *)ke->scancode); + break; + + case 8: + *scancode = *((u64 *)ke->scancode); + break; + + default: + return -EINVAL; + } + + return 0; +} + +/** * ir_create_table() - initializes a scancode table * @dev: the rc_dev device * @rc_map: the rc_map to initialize @@ -285,13 +320,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, /* Did the user wish to remove the mapping? */ if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) { - dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04x\n", + dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04llx\n", index, rc_map->scan[index].scancode); rc_map->len--; memmove(&rc_map->scan[index], &rc_map->scan[index+ 1], (rc_map->len - index) * sizeof(struct rc_map_table)); } else { - dev_dbg(&dev->dev, "#%d: %s scan 0x%04x with key 0x%04x\n", + dev_dbg(&dev->dev, "#%d: %s scan 0x%04llx with key 0x%04x\n", index, old_keycode == KEY_RESERVED ? "New" : "Replacing", rc_map->scan[index].scancode, new_keycode); @@ -334,8 +369,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, */ static unsigned int ir_establish_scancode(struct rc_dev *dev, struct rc_map *rc_map, - unsigned int scancode, - bool resize) + u64 scancode, bool resize) { unsigned int i; @@ -394,7 +428,7 @@ static int ir_setkeycode(struct input_dev *idev, struct rc_dev *rdev = input_get_drvdata(idev); struct rc_map *rc_map = &rdev->rc_map; unsigned int index; - unsigned int scancode; + u64 scancode; int retval = 0; unsigned long flags; @@ -407,7 +441,7 @@ static int ir_setkeycode(struct input_dev *idev, goto out; } } else { - retval = input_scancode_to_scalar(ke, &scancode); + retval = scancode_to_u64(ke, &scancode); if (retval) goto out; @@ -434,8 +468,7 @@ out: * * return: -ENOMEM if all keycodes could not be inserted, otherwise zero. */ -static int ir_setkeytable(struct rc_dev *dev, - const struct rc_map *from) +static int ir_setkeytable(struct rc_dev *dev, const struct rc_map *from) { struct rc_map *rc_map = &dev->rc_map; unsigned int i, index; @@ -466,7 +499,7 @@ static int ir_setkeytable(struct rc_dev *dev, static int rc_map_cmp(const void *key, const void *elt) { - const unsigned int *scancode = key; + const u64 *scancode = key; const struct rc_map_table *e = elt; if (*scancode < e->scancode) @@ -487,7 +520,7 @@ static int rc_map_cmp(const void *key, const void *elt) * return: index in the table, -1U if not found */ static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map, - unsigned int scancode) + u64 scancode) { struct rc_map_table *res; @@ -516,7 +549,7 @@ static int ir_getkeycode(struct input_dev *idev, struct rc_map_table *entry; unsigned long flags; unsigned int index; - unsigned int scancode; + u64 scancode; int retval; spin_lock_irqsave(&rc_map->lock, flags); @@ -524,7 +557,7 @@ static int ir_getkeycode(struct input_dev *idev, if (ke->flags & INPUT_KEYMAP_BY_INDEX) { index = ke->index; } else { - retval = input_scancode_to_scalar(ke, &scancode); + retval = scancode_to_u64(ke, &scancode); if (retval) goto out; @@ -538,7 +571,6 @@ static int ir_getkeycode(struct input_dev *idev, ke->keycode = entry->keycode; ke->len = sizeof(entry->scancode); memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode)); - } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) { /* * We do not really know the valid range of scancodes @@ -570,7 +602,7 @@ out: * * return: the corresponding keycode, or KEY_RESERVED */ -u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode) +u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode) { struct rc_map *rc_map = &dev->rc_map; unsigned int keycode; @@ -586,7 +618,7 @@ u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode) spin_unlock_irqrestore(&rc_map->lock, flags); if (keycode != KEY_RESERVED) - dev_dbg(&dev->dev, "%s: scancode 0x%04x keycode 0x%02x\n", + dev_dbg(&dev->dev, "%s: scancode 0x%04llx keycode 0x%02x\n", dev->device_name, scancode, keycode); return keycode; @@ -719,8 +751,11 @@ void rc_repeat(struct rc_dev *dev) spin_lock_irqsave(&dev->keylock, flags); - input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode); - input_sync(dev->input_dev); + if (dev->last_scancode <= U32_MAX) { + input_event(dev->input_dev, EV_MSC, MSC_SCAN, + dev->last_scancode); + input_sync(dev->input_dev); + } if (dev->keypressed) { dev->keyup_jiffies = jiffies + timeout; @@ -743,7 +778,7 @@ EXPORT_SYMBOL_GPL(rc_repeat); * called with keylock held. */ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, - u32 scancode, u32 keycode, u8 toggle) + u64 scancode, u32 keycode, u8 toggle) { bool new_event = (!dev->keypressed || dev->last_protocol != protocol || @@ -761,7 +796,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, if (new_event && dev->keypressed) ir_do_keyup(dev, false); - input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode); + if (scancode <= U32_MAX) + input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode); dev->last_protocol = protocol; dev->last_scancode = scancode; @@ -772,7 +808,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, /* Register a keypress */ dev->keypressed = true; - dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n", + dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08llx\n", dev->device_name, keycode, protocol, scancode); input_report_key(dev->input_dev, keycode, 1); @@ -809,7 +845,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, * This routine is used to signal that a key has been pressed on the * remote control. */ -void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, u8 toggle) { unsigned long flags; @@ -840,7 +876,7 @@ EXPORT_SYMBOL_GPL(rc_keydown); * remote control. The driver must manually call rc_keyup() at a later stage. */ void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, - u32 scancode, u8 toggle) + u64 scancode, u8 toggle) { unsigned long flags; u32 keycode = rc_g_keycode_from_table(dev, scancode); diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c index d789d82df7c4..f86ef1ca1288 100644 --- a/drivers/media/spi/gs1662.c +++ b/drivers/media/spi/gs1662.c @@ -147,11 +147,17 @@ static int gs_read_register(struct spi_device *spi, u16 addr, u16 *value) { .tx_buf = &buf_addr, .len = 2, - .delay_usecs = 1, + .delay = { + .value = 1, + .unit = SPI_DELAY_UNIT_USECS + }, }, { .rx_buf = &buf_value, .len = 2, - .delay_usecs = 1, + .delay = { + .value = 1, + .unit = SPI_DELAY_UNIT_USECS + }, }, }; @@ -175,11 +181,17 @@ static int gs_write_register(struct spi_device *spi, u16 addr, u16 value) { .tx_buf = &buf_addr, .len = 2, - .delay_usecs = 1, + .delay = { + .value = 1, + .unit = SPI_DELAY_UNIT_USECS + }, }, { .tx_buf = &buf_value, .len = 2, - .delay_usecs = 1, + .delay = { + .value = 1, + .unit = SPI_DELAY_UNIT_USECS + }, }, }; diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig index 03c2944f6273..e678d3d11467 100644 --- a/drivers/media/usb/Kconfig +++ b/drivers/media/usb/Kconfig @@ -25,7 +25,6 @@ if MEDIA_ANALOG_TV_SUPPORT comment "Analog TV USB devices" source "drivers/media/usb/pvrusb2/Kconfig" source "drivers/media/usb/hdpvr/Kconfig" -source "drivers/media/usb/usbvision/Kconfig" source "drivers/media/usb/stk1160/Kconfig" source "drivers/media/usb/go7007/Kconfig" endif diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile index 21e46b10caa5..169aa07c97bd 100644 --- a/drivers/media/usb/Makefile +++ b/drivers/media/usb/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_VIDEO_CPIA2) += cpia2/ obj-$(CONFIG_VIDEO_AU0828) += au0828/ obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/ obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/ -obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ obj-$(CONFIG_VIDEO_STK1160) += stk1160/ obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/ obj-$(CONFIG_VIDEO_TM6000) += tm6000/ diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index d1895334cbbf..51b8d14fb4dc 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -1042,7 +1042,7 @@ static int au0828_v4l2_close(struct file *filp) dev->streaming_users, dev->users); mutex_lock(&dev->lock); - if (vdev->vfl_type == VFL_TYPE_GRABBER && dev->vid_timeout_running) { + if (vdev->vfl_type == VFL_TYPE_VIDEO && dev->vid_timeout_running) { /* Cancel timeout thread in case they didn't call streamoff */ dev->vid_timeout_running = 0; del_timer_sync(&dev->vid_timeout); @@ -2007,7 +2007,7 @@ int au0828_analog_register(struct au0828_dev *dev, /* Register the v4l2 device */ video_set_drvdata(&dev->vdev, dev); - retval = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); + retval = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1); if (retval != 0) { dprintk(1, "unable to register video device (error = %d).\n", retval); diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 039963a7765b..198ddfb8d2b1 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -511,6 +511,9 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) return ret; } + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + switch (fc_usb->udev->speed) { case USB_SPEED_LOW: err("cannot handle USB speed because it is too slow."); @@ -544,9 +547,6 @@ static int flexcop_usb_probe(struct usb_interface *intf, struct flexcop_device *fc = NULL; int ret; - if (intf->cur_altsetting->desc.bNumEndpoints < 1) - return -ENODEV; - if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { err("out of memory\n"); return -ENOMEM; diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 9d3d05125d7b..e488e7870f42 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -1134,7 +1134,7 @@ int cpia2_register_camera(struct camera_data *cam) reset_camera_struct_v4l(cam); /* register v4l device */ - if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) { + if (video_register_device(&cam->vdev, VFL_TYPE_VIDEO, video_nr) < 0) { ERR("video_register_device failed\n"); return -ENODEV; } diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 1aec4459f50a..b0cd51134654 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1790,7 +1790,7 @@ int cx231xx_417_register(struct cx231xx *dev) dev->v4l_device.queue = q; err = video_register_device(&dev->v4l_device, - VFL_TYPE_GRABBER, -1); + VFL_TYPE_VIDEO, -1); if (err < 0) { dprintk(3, "%s: can't register mpeg device\n", dev->name); v4l2_ctrl_handler_free(&dev->mpeg_ctrl_handler.hdl); diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c index e205f7f0a56a..0037b4b1381e 100644 --- a/drivers/media/usb/cx231xx/cx231xx-dvb.c +++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c @@ -147,7 +147,7 @@ static struct tda18271_config pv_tda18271_config = { .small_i2c = TDA18271_03_BYTE_CHUNK_INIT, }; -static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = { +static const struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = { .qam_if_khz = 4000, .vsb_if_khz = 3250, .spectral_inversion = 1, diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index 69abafaebbf3..8bff7d8a0310 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -1785,7 +1785,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev) dev->vdev.device_caps |= V4L2_CAP_TUNER; /* register v4l2 video video_device */ - ret = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, + ret = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, video_nr[dev->devno]); if (ret) { dev_err(dev->dev, diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c index 0514e87405b6..89a1b204b90c 100644 --- a/drivers/media/usb/dvb-usb-v2/anysee.c +++ b/drivers/media/usb/dvb-usb-v2/anysee.c @@ -318,14 +318,14 @@ static struct tda10023_config anysee_tda10023_tda18212_config = { .deltaf = 0xba02, }; -static struct tda18212_config anysee_tda18212_config = { +static const struct tda18212_config anysee_tda18212_config = { .if_dvbt_6 = 4150, .if_dvbt_7 = 4150, .if_dvbt_8 = 4150, .if_dvbc = 5000, }; -static struct tda18212_config anysee_tda18212_config2 = { +static const struct tda18212_config anysee_tda18212_config2 = { .if_dvbt_6 = 3550, .if_dvbt_7 = 3700, .if_dvbt_8 = 4150, diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 62d3566bf7ee..fd8b42bb9a84 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -486,13 +486,10 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], static u8 obuf[64], ibuf[64]; int i, read, read_o; u16 len; - u8 gate = st->i2c_gate; + u8 gate; mutex_lock(&d->i2c_mutex); - if (gate == 0) - gate = 5; - for (i = 0; i < num; i++) { read_o = msg[i].flags & I2C_M_RD; read = i + 1 < num && msg[i + 1].flags & I2C_M_RD; diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index c6881a1b3232..2080f6ef4be1 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -552,6 +552,9 @@ tuner_found: if (ret) goto err; + /* slave demod needs some time to wake up */ + msleep(20); + /* check slave answers */ ret = rtl28xxu_ctrl_msg(d, &req_mn88472); if (ret == 0 && buf[0] == 0x02) { diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c index 0699f718d052..001cae648797 100644 --- a/drivers/media/usb/dvb-usb/cxusb-analog.c +++ b/drivers/media/usb/dvb-usb/cxusb-analog.c @@ -1223,7 +1223,7 @@ static int cxusb_medion_g_tuner(struct file *file, void *fh, if (tuner->index != 0) return -EINVAL; - if (vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_type == VFL_TYPE_VIDEO) tuner->type = V4L2_TUNER_ANALOG_TV; else tuner->type = V4L2_TUNER_RADIO; @@ -1259,7 +1259,7 @@ static int cxusb_medion_g_tuner(struct file *file, void *fh, if (ret != 0) return ret; - if (vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_type == VFL_TYPE_VIDEO) strscpy(tuner->name, "TV tuner", sizeof(tuner->name)); else strscpy(tuner->name, "Radio tuner", sizeof(tuner->name)); @@ -1292,7 +1292,7 @@ static int cxusb_medion_s_tuner(struct file *file, void *fh, * make sure that cx25840 is in a correct TV / radio mode, * since calls above may have changed it for tuner / IF demod */ - if (vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_type == VFL_TYPE_VIDEO) v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm); else v4l2_subdev_call(cxdev->cx25840, tuner, s_radio); @@ -1335,7 +1335,7 @@ static int cxusb_medion_s_frequency(struct file *file, void *fh, * make sure that cx25840 is in a correct TV / radio mode, * since calls above may have changed it for tuner / IF demod */ - if (vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_type == VFL_TYPE_VIDEO) v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm); else v4l2_subdev_call(cxdev->cx25840, tuner, s_radio); @@ -1564,7 +1564,7 @@ static int cxusb_videoradio_release(struct file *f) cxusb_vprintk(dvbdev, OPS, "got release\n"); - if (vdev->vfl_type == VFL_TYPE_GRABBER) + if (vdev->vfl_type == VFL_TYPE_VIDEO) ret = vb2_fop_release(f); else ret = v4l2_fh_release(f); @@ -1663,7 +1663,7 @@ static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev) cxdev->videodev->lock = &cxdev->dev_lock; video_set_drvdata(cxdev->videodev, dvbdev); - ret = video_register_device(cxdev->videodev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(cxdev->videodev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(&dvbdev->udev->dev, "video device register failed, ret = %d\n", ret); diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index e53c58ab6488..ef62dd6c5ae4 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ - if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1) + if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1) return -ENODEV; purb = usb_alloc_urb(0, GFP_KERNEL); @@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) * Some devices like the Hauppauge NovaTD model 52009 use an interrupt * endpoint, while others use a bulk one. */ - e = &intf->altsetting[0].endpoint[rc_ep].desc; + e = &intf->cur_altsetting->endpoint[rc_ep].desc; if (usb_endpoint_dir_in(e)) { if (usb_endpoint_xfer_bulk(e)) { pipe = usb_rcvbulkpipe(d->udev, rc_ep); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 8b584507dd59..1007366a295b 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -1524,6 +1524,29 @@ static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) return -EIO; } +static int tt_s2_4600_frontend_attach_probe_demod(struct dvb_usb_device *d, + const int probe_addr) +{ + struct dw2102_state *state = d->priv; + + state->data[0] = 0x9; + state->data[1] = 0x1; + state->data[2] = 0x1; + state->data[3] = probe_addr; + state->data[4] = 0x0; + + if (dvb_usb_generic_rw(d, state->data, 5, state->data, 2, 0) < 0) { + err("i2c probe for address 0x%x failed.", probe_addr); + return 0; + } + + if (state->data[0] != 8) /* fail(7) or error, no device at address */ + return 0; + + /* probing successful */ + return 1; +} + static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; @@ -1533,6 +1556,7 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) struct i2c_board_info board_info; struct m88ds3103_platform_data m88ds3103_pdata = {}; struct ts2020_config ts2020_config = {}; + int demod_addr; mutex_lock(&d->data_mutex); @@ -1570,8 +1594,22 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); + /* probe for demodulator i2c address */ + demod_addr = -1; + if (tt_s2_4600_frontend_attach_probe_demod(d, 0x68)) + demod_addr = 0x68; + else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x69)) + demod_addr = 0x69; + else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x6a)) + demod_addr = 0x6a; + mutex_unlock(&d->data_mutex); + if (demod_addr < 0) { + err("probing for demodulator failed. Is the external power switched on?"); + return -ENODEV; + } + /* attach demod */ m88ds3103_pdata.clk = 27000000; m88ds3103_pdata.i2c_wr_max = 33; @@ -1586,8 +1624,11 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) m88ds3103_pdata.lnb_hv_pol = 1; m88ds3103_pdata.lnb_en_pol = 0; memset(&board_info, 0, sizeof(board_info)); - strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE); - board_info.addr = 0x68; + if (demod_addr == 0x6a) + strscpy(board_info.type, "m88ds3103b", I2C_NAME_SIZE); + else + strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE); + board_info.addr = demod_addr; board_info.platform_data = &m88ds3103_pdata; request_module("m88ds3103"); client = i2c_new_client_device(&d->i2c_adap, &board_info); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index def9cdd931a9..a8c321d11827 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -2398,6 +2398,20 @@ const struct em28xx_board em28xx_boards[] = { .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* + * 2013:0259 PCTV DVB-S2 Stick (461e_v2) + * Empia EM28178, Montage M88DS3103b, Montage M88TS2022, Allegro A8293 + */ + [EM28178_BOARD_PCTV_461E_V2] = { + .def_i2c_bus = 1, + .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | + EM28XX_I2C_FREQ_400_KHZ, + .name = "PCTV DVB-S2 Stick (461e v2)", + .tuner_type = TUNER_ABSENT, + .tuner_gpio = pctv_461e, + .has_dvb = 1, + .ir_codes = RC_MAP_PINNACLE_PCTV_HD, + }, + /* * 2013:025f PCTV tripleStick (292e). * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2157 */ @@ -2696,6 +2710,10 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE }, { USB_DEVICE(0x2013, 0x0258), .driver_info = EM28178_BOARD_PCTV_461E }, + { USB_DEVICE(0x2013, 0x0461), + .driver_info = EM28178_BOARD_PCTV_461E_V2 }, + { USB_DEVICE(0x2013, 0x0259), + .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2013, 0x0264), /* Hauppauge WinTV-soloHD 292e SE */ diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index 0ab6c493bc74..fb9cbfa81a84 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -1219,6 +1219,61 @@ static int em28178_dvb_init_pctv_461e(struct em28xx *dev) return 0; } +static int em28178_dvb_init_pctv_461e_v2(struct em28xx *dev) +{ + struct em28xx_dvb *dvb = dev->dvb; + struct i2c_adapter *i2c_adapter; + struct m88ds3103_platform_data m88ds3103_pdata = {}; + struct ts2020_config ts2020_config = {}; + struct a8293_platform_data a8293_pdata = {}; + + /* attach demod */ + m88ds3103_pdata.clk = 27000000; + m88ds3103_pdata.i2c_wr_max = 33; + m88ds3103_pdata.ts_mode = M88DS3103_TS_PARALLEL; + m88ds3103_pdata.ts_clk = 16000; + m88ds3103_pdata.ts_clk_pol = 0; + m88ds3103_pdata.agc = 0x99; + m88ds3103_pdata.agc_inv = 0; + m88ds3103_pdata.spec_inv = 0; + dvb->i2c_client_demod = dvb_module_probe("m88ds3103", "m88ds3103b", + &dev->i2c_adap[dev->def_i2c_bus], + 0x6a, &m88ds3103_pdata); + + if (!dvb->i2c_client_demod) + return -ENODEV; + + dvb->fe[0] = m88ds3103_pdata.get_dvb_frontend(dvb->i2c_client_demod); + i2c_adapter = m88ds3103_pdata.get_i2c_adapter(dvb->i2c_client_demod); + + /* attach tuner */ + ts2020_config.fe = dvb->fe[0]; + dvb->i2c_client_tuner = dvb_module_probe("ts2020", "ts2022", + i2c_adapter, + 0x60, &ts2020_config); + if (!dvb->i2c_client_tuner) { + dvb_module_release(dvb->i2c_client_demod); + return -ENODEV; + } + + /* delegate signal strength measurement to tuner */ + dvb->fe[0]->ops.read_signal_strength = + dvb->fe[0]->ops.tuner_ops.get_rf_strength; + + /* attach SEC */ + a8293_pdata.dvb_frontend = dvb->fe[0]; + dvb->i2c_client_sec = dvb_module_probe("a8293", NULL, + &dev->i2c_adap[dev->def_i2c_bus], + 0x08, &a8293_pdata); + if (!dvb->i2c_client_sec) { + dvb_module_release(dvb->i2c_client_tuner); + dvb_module_release(dvb->i2c_client_demod); + return -ENODEV; + } + + return 0; +} + static int em28178_dvb_init_pctv_292e(struct em28xx *dev) { struct em28xx_dvb *dvb = dev->dvb; @@ -1860,6 +1915,11 @@ static int em28xx_dvb_init(struct em28xx *dev) if (result) goto out_free; break; + case EM28178_BOARD_PCTV_461E_V2: + result = em28178_dvb_init_pctv_461e_v2(dev); + if (result) + goto out_free; + break; case EM28178_BOARD_PCTV_292E: result = em28178_dvb_init_pctv_292e(dev); if (result) diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index b0f7390e4b4f..6b84c3413e83 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -2141,7 +2141,7 @@ static int em28xx_v4l2_open(struct file *filp) int ret; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: @@ -2789,7 +2789,7 @@ static int em28xx_v4l2_init(struct em28xx *dev) } /* register v4l2 video video_device */ - ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER, + ret = video_register_device(&v4l2->vdev, VFL_TYPE_VIDEO, video_nr[dev->devno]); if (ret) { dev_err(&dev->intf->dev, diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 4ecadd57dac7..acbb62397314 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -150,6 +150,7 @@ #define EM2884_BOARD_TERRATEC_H6 101 #define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102 #define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103 +#define EM28178_BOARD_PCTV_461E_V2 104 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index ff2aa057c1fb..f889c9d740cd 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf, struct go7007_usb *usb; const struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_host_endpoint *ep; unsigned num_i2c_devs; char *name; int video_pipe, i, v_urb_len; @@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf, if (usb->intr_urb->transfer_buffer == NULL) goto allocfail; - if (go->board_id == GO7007_BOARDID_SENSORAY_2250) + ep = usb->usbdev->ep_in[4]; + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16), diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c index 0b3d185f3cb0..b2edc4deaca3 100644 --- a/drivers/media/usb/go7007/go7007-v4l2.c +++ b/drivers/media/usb/go7007/go7007-v4l2.c @@ -1138,7 +1138,7 @@ int go7007_v4l2_init(struct go7007 *go) go7007_s_input(go); if (go->board_info->sensor_flags & GO7007_SENSOR_TV) go7007_s_std(go); - rv = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + rv = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (rv < 0) return rv; dev_info(go->dev, "registered device %s [v4l2]\n", diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index c1b307bbe540..0566e00d6fea 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -1555,7 +1555,7 @@ int gspca_dev_probe2(struct usb_interface *intf, /* init video stuff */ ret = video_register_device(&gspca_dev->vdev, - VFL_TYPE_GRABBER, + VFL_TYPE_VIDEO, -1); if (ret < 0) { pr_err("video_register_device err %d\n", ret); diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index f417dfc0b872..0afe70a3f9a2 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5); @@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2); diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 79653d409951..95673fc0a99c 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) @@ -306,11 +309,21 @@ out: static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); @@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + /* + * Existence of altsetting and endpoint was verified in + * stv06xx_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c index 6d1007715ff7..ae382b3b5f7f 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c @@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd) alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) return -ENODEV; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); /* If we don't have enough bandwidth use a lower framerate */ diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index 934a90bd78c2..c579b100f066 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } @@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev) static int sd_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; int max_packet_size; @@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) break; } + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; @@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev) break; } + /* + * Existence of altsetting and endpoint was verified in sd_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index bad71d863d39..563128d11731 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -1238,7 +1238,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, dev->video_dev.v4l2_dev = &dev->v4l2_dev; video_set_drvdata(&dev->video_dev, dev); - res = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER, devnum); + res = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO, devnum); if (res < 0) { v4l2_err(&dev->v4l2_dev, "video_device registration failed\n"); goto error; diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index afda438d4e0a..0655aa9ecf28 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -635,8 +635,6 @@ static void pulse8_cec_adap_free(struct cec_adapter *adap) cancel_delayed_work_sync(&pulse8->ping_eeprom_work); cancel_work_sync(&pulse8->irq_work); cancel_work_sync(&pulse8->tx_work); - serio_close(pulse8->serio); - serio_set_drvdata(pulse8->serio, NULL); kfree(pulse8); } @@ -652,6 +650,9 @@ static void pulse8_disconnect(struct serio *serio) struct pulse8 *pulse8 = serio_get_drvdata(serio); cec_unregister_adapter(pulse8->adap); + pulse8->serio = NULL; + serio_set_drvdata(serio, NULL); + serio_close(serio); } static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, @@ -840,6 +841,8 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv) serio_set_drvdata(serio, pulse8); INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler); INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler); + INIT_DELAYED_WORK(&pulse8->ping_eeprom_work, + pulse8_ping_eeprom_work_handler); mutex_init(&pulse8->lock); spin_lock_init(&pulse8->msg_lock); pulse8->config_pending = false; @@ -865,17 +868,16 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv) pulse8->restoring_config = true; } - INIT_DELAYED_WORK(&pulse8->ping_eeprom_work, - pulse8_ping_eeprom_work_handler); schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD); return 0; close_serio: + pulse8->serio = NULL; + serio_set_drvdata(serio, NULL); serio_close(serio); delete_adap: cec_delete_adapter(pulse8->adap); - serio_set_drvdata(serio, NULL); free_device: kfree(pulse8); return err; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index eaa08c7999d4..9657c1883311 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1196,7 +1196,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, hdw = vp->channel.mc_head->hdw; dip->v4l_type = v4l_type; switch (v4l_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_video; @@ -1276,7 +1276,7 @@ struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp) /* register streams */ vp->dev_video = kzalloc(sizeof(*vp->dev_video),GFP_KERNEL); if (!vp->dev_video) goto fail; - pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_GRABBER); + pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_VIDEO); if (pvr2_hdw_get_input_available(vp->channel.mc_head->hdw) & (1 << PVR2_CVAL_INPUT_RADIO)) { vp->dev_radio = kzalloc(sizeof(*vp->dev_radio),GFP_KERNEL); diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 9b76cf133d52..d57b8b786506 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -1116,7 +1116,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id pdev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; - rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1); + rc = video_register_device(&pdev->vdev, VFL_TYPE_VIDEO, -1); if (rc < 0) { PWC_ERROR("Failed to register as video device (%d).\n", rc); goto err_unregister_v4l2_dev; diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index 329ec8089592..4af55e2478be 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c @@ -1649,11 +1649,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev) video_set_drvdata(&vc->vdev, vc); if (video_nr == -1) ret = video_register_device(&vc->vdev, - VFL_TYPE_GRABBER, + VFL_TYPE_VIDEO, video_nr); else ret = video_register_device(&vc->vdev, - VFL_TYPE_GRABBER, + VFL_TYPE_VIDEO, cur_nr + i); if (ret) { diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index bcd14c66e8df..6a4eb616d516 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -830,7 +830,7 @@ int stk1160_video_register(struct stk1160 *dev) dev->norm); video_set_drvdata(&dev->vdev, dev); - rc = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); + rc = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1); if (rc < 0) { stk1160_err("video_register_device failed (%d)\n", rc); return rc; diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index b22501f76b78..a45d464427c4 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -1254,7 +1254,7 @@ static int stk_register_video_device(struct stk_camera *dev) dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; video_set_drvdata(&dev->vdev, dev); - err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1); if (err) pr_err("v4l registration failed\n"); else diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index c07a81a6cbe2..bfba06ea60e9 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -1300,7 +1300,7 @@ static int __tm6000_open(struct file *file) video_device_node_name(vdev)); switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: @@ -1639,7 +1639,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev) INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); - ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, video_nr); + ret = video_register_device(&dev->vfd, VFL_TYPE_VIDEO, video_nr); if (ret < 0) { printk(KERN_INFO "%s: can't register video device\n", diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index 5095c380b2c1..ee9c656d121f 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, index, NULL, 0, 0); + value, index, NULL, 0, USB_CTRL_GET_TIMEOUT); if (ret < 0) return ret; } diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 3d9284a09ee5..c89efcd46163 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) ret = usb_control_msg(usbtv->udev, usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, USBTV_BASE + 0x0244, (void *)data, 3, 0); + 0, USBTV_BASE + 0x0244, (void *)data, 3, + USB_CTRL_GET_TIMEOUT); if (ret < 0) goto error; } @@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, index, (void *)data, size, 0); + 0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT); error: if (ret < 0) @@ -940,7 +941,7 @@ int usbtv_video_init(struct usbtv *usbtv) usbtv->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; video_set_drvdata(&usbtv->vdev, usbtv); - ret = video_register_device(&usbtv->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&usbtv->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_warn(usbtv->dev, "Could not register video device\n"); goto vdev_fail; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 99883550375e..431d86e1c94b 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2014,7 +2014,7 @@ int uvc_register_video_device(struct uvc_device *dev, */ video_set_drvdata(vdev, stream); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { uvc_printk(KERN_ERR, "Failed to register %s device (%d).\n", v4l2_type_names[type], ret); diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c index 57dbcc8083bf..8c670934d920 100644 --- a/drivers/media/usb/zr364xx/zr364xx.c +++ b/drivers/media/usb/zr364xx/zr364xx.c @@ -1516,7 +1516,7 @@ static int zr364xx_probe(struct usb_interface *intf, V4L2_FIELD_NONE, sizeof(struct zr364xx_buffer), cam, &cam->lock); - err = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1); + err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1); if (err) { dev_err(&udev->dev, "video_register_device failed\n"); goto fail; diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 2928c5e0a73d..93d33d1db4e8 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -4296,10 +4296,17 @@ void v4l2_ctrl_request_complete(struct media_request *req, continue; v4l2_ctrl_lock(ctrl); - if (ref->req) + if (ref->req) { ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); - else + } else { ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); + /* + * Set ref->req to ensure that when userspace wants to + * obtain the controls of this request it will take + * this value and not the current value of the control. + */ + ref->req = ref; + } v4l2_ctrl_unlock(ctrl); } diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index da42d172714a..97b6a3af1361 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -542,13 +542,13 @@ static void determine_valid_ioctls(struct video_device *vdev) V4L2_CAP_META_OUTPUT; DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; - bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER && + bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO && (vdev->device_caps & vid_caps); bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI; bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR; bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH; - bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER && + bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO && (vdev->device_caps & meta_caps); bool is_rx = vdev->vfl_dir != VFL_DIR_TX; bool is_tx = vdev->vfl_dir != VFL_DIR_RX; @@ -783,7 +783,7 @@ static int video_register_media_controller(struct video_device *vdev) vdev->entity.function = MEDIA_ENT_F_UNKNOWN; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: intf_type = MEDIA_INTF_T_V4L_VIDEO; vdev->entity.function = MEDIA_ENT_F_IO_V4L; break; @@ -891,7 +891,7 @@ int __video_register_device(struct video_device *vdev, /* Part 1: check device type */ switch (type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: name_base = "video"; break; case VFL_TYPE_VBI: @@ -935,7 +935,7 @@ int __video_register_device(struct video_device *vdev, * of 128-191 and just pick the first free minor there * (new style). */ switch (type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: minor_offset = 0; minor_cnt = 64; break; diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 63d6b147b21e..c69941214bb2 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -111,9 +111,6 @@ EXPORT_SYMBOL_GPL(v4l2_device_unregister); int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, struct v4l2_subdev *sd) { -#if defined(CONFIG_MEDIA_CONTROLLER) - struct media_entity *entity = &sd->entity; -#endif int err; /* Check for valid input */ @@ -143,7 +140,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, #if defined(CONFIG_MEDIA_CONTROLLER) /* Register the entity. */ if (v4l2_dev->mdev) { - err = media_device_register_entity(v4l2_dev->mdev, entity); + err = media_device_register_entity(v4l2_dev->mdev, &sd->entity); if (err < 0) goto error_module; } @@ -163,7 +160,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, error_unregister: #if defined(CONFIG_MEDIA_CONTROLLER) - media_device_unregister_entity(entity); + media_device_unregister_entity(&sd->entity); #endif error_module: if (!sd->owner_v4l2_dev) @@ -179,6 +176,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd) if (sd->internal_ops && sd->internal_ops->release) sd->internal_ops->release(sd); + sd->devnode = NULL; module_put(owner); } diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 6ece4320e1d2..97f0f8b23b5d 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -557,33 +557,28 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); -int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode, +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, struct v4l2_fwnode_link *link) { - const char *port_prop = is_of_node(__fwnode) ? "reg" : "port"; - struct fwnode_handle *fwnode; + struct fwnode_endpoint fwep; memset(link, 0, sizeof(*link)); - fwnode = fwnode_get_parent(__fwnode); - fwnode_property_read_u32(fwnode, port_prop, &link->local_port); - fwnode = fwnode_get_next_parent(fwnode); - if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports")) - fwnode = fwnode_get_next_parent(fwnode); - link->local_node = fwnode; + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->local_id = fwep.id; + link->local_port = fwep.port; + link->local_node = fwnode_graph_get_port_parent(fwnode); - fwnode = fwnode_graph_get_remote_endpoint(__fwnode); + fwnode = fwnode_graph_get_remote_endpoint(fwnode); if (!fwnode) { fwnode_handle_put(fwnode); return -ENOLINK; } - fwnode = fwnode_get_parent(fwnode); - fwnode_property_read_u32(fwnode, port_prop, &link->remote_port); - fwnode = fwnode_get_next_parent(fwnode); - if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports")) - fwnode = fwnode_get_next_parent(fwnode); - link->remote_node = fwnode; + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->remote_id = fwep.id; + link->remote_port = fwep.port; + link->remote_node = fwnode_graph_get_port_parent(fwnode); return 0; } @@ -596,6 +591,171 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) } EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link); +static const struct v4l2_fwnode_connector_conv { + enum v4l2_connector_type type; + const char *compatible; +} connectors[] = { + { + .type = V4L2_CONN_COMPOSITE, + .compatible = "composite-video-connector", + }, { + .type = V4L2_CONN_SVIDEO, + .compatible = "svideo-connector", + }, +}; + +static enum v4l2_connector_type +v4l2_fwnode_string_to_connector_type(const char *con_str) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(connectors); i++) + if (!strcmp(con_str, connectors[i].compatible)) + return connectors[i].type; + + return V4L2_CONN_UNKNOWN; +} + +static void +v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *vc) +{ + u32 stds; + int ret; + + ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds); + + /* The property is optional. */ + vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds; +} + +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector) +{ + struct v4l2_connector_link *link, *tmp; + + if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN) + return; + + list_for_each_entry_safe(link, tmp, &connector->links, head) { + v4l2_fwnode_put_link(&link->fwnode_link); + list_del(&link->head); + kfree(link); + } + + kfree(connector->label); + connector->label = NULL; + connector->type = V4L2_CONN_UNKNOWN; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free); + +static enum v4l2_connector_type +v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode) +{ + const char *type_name; + int err; + + if (!fwnode) + return V4L2_CONN_UNKNOWN; + + /* The connector-type is stored within the compatible string. */ + err = fwnode_property_read_string(fwnode, "compatible", &type_name); + if (err) + return V4L2_CONN_UNKNOWN; + + return v4l2_fwnode_string_to_connector_type(type_name); +} + +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_node; + enum v4l2_connector_type connector_type; + const char *label; + int err; + + if (!fwnode) + return -EINVAL; + + memset(connector, 0, sizeof(*connector)); + + INIT_LIST_HEAD(&connector->links); + + connector_node = fwnode_graph_get_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + if (connector_type == V4L2_CONN_UNKNOWN) { + fwnode_handle_put(connector_node); + connector_node = fwnode_graph_get_remote_port_parent(fwnode); + connector_type = v4l2_fwnode_get_connector_type(connector_node); + } + + if (connector_type == V4L2_CONN_UNKNOWN) { + pr_err("Unknown connector type\n"); + err = -ENOTCONN; + goto out; + } + + connector->type = connector_type; + connector->name = fwnode_get_name(connector_node); + err = fwnode_property_read_string(connector_node, "label", &label); + connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL); + + /* Parse the connector specific properties. */ + switch (connector->type) { + case V4L2_CONN_COMPOSITE: + case V4L2_CONN_SVIDEO: + v4l2_fwnode_connector_parse_analog(connector_node, connector); + break; + /* Avoid compiler warnings */ + case V4L2_CONN_UNKNOWN: + break; + } + +out: + fwnode_handle_put(connector_node); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse); + +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector) +{ + struct fwnode_handle *connector_ep; + struct v4l2_connector_link *link; + int err; + + if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN) + return -EINVAL; + + connector_ep = fwnode_graph_get_remote_endpoint(fwnode); + if (!connector_ep) + return -ENOTCONN; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto err; + } + + err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link); + if (err) + goto err; + + fwnode_handle_put(connector_ep); + + list_add(&link->head, &connector->links); + connector->nr_of_links++; + + return 0; + +err: + kfree(link); + fwnode_handle_put(connector_ep); + + return err; +} +EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link); + static int v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev, struct v4l2_async_notifier *notifier, diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c index 5bf99e7c0c09..b4acca75644b 100644 --- a/drivers/media/v4l2-core/v4l2-i2c.c +++ b/drivers/media/v4l2-core/v4l2-i2c.c @@ -74,10 +74,10 @@ struct v4l2_subdev /* Create the i2c client */ if (info->addr == 0 && probe_addrs) - client = i2c_new_probed_device(adapter, info, probe_addrs, - NULL); + client = i2c_new_scanned_device(adapter, info, probe_addrs, + NULL); else - client = i2c_new_device(adapter, info); + client = i2c_new_client_device(adapter, info); /* * Note: by loading the module first we are certain that c->driver @@ -88,7 +88,7 @@ struct v4l2_subdev * want to use the i2c device, so explicitly loading the module * is the best alternative. */ - if (!client || !client->dev.driver) + if (!i2c_client_has_driver(client)) goto error; /* Lock the module so we can safely get the v4l2_subdev pointer */ @@ -110,7 +110,7 @@ error: * If we have a client but no subdev, then something went wrong and * we must unregister the client. */ - if (client && !sd) + if (!IS_ERR(client) && !sd) i2c_unregister_device(client); return sd; } diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index aaf83e254272..b2ef8e60ea7d 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -941,12 +941,12 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type) V4L2_CAP_META_OUTPUT; struct video_device *vfd = video_devdata(file); const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER && + bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO && (vfd->device_caps & vid_caps); bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER && + bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO && (vfd->device_caps & meta_caps); bool is_rx = vfd->vfl_dir != VFL_DIR_TX; bool is_tx = vfd->vfl_dir != VFL_DIR_RX; @@ -1222,6 +1222,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; @@ -1306,6 +1307,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; + case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break; diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index 014a2a97cadd..0fffdd3ce6a4 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source); * use_count field stores the total number of users of all video device nodes * in the pipeline. * - * The v4l2_pipeline_pm_use() function must be called in the open() and + * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and * close() handlers of video device nodes. It increments or decrements the use * count of all subdev entities in the pipeline. * @@ -423,7 +423,7 @@ static int pipeline_pm_power(struct media_entity *entity, int change, return ret; } -int v4l2_pipeline_pm_use(struct media_entity *entity, int use) +static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use) { struct media_device *mdev = entity->graph_obj.mdev; int change = use ? 1 : -1; @@ -444,7 +444,19 @@ int v4l2_pipeline_pm_use(struct media_entity *entity, int use) return ret; } -EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_use); + +int v4l2_pipeline_pm_get(struct media_entity *entity) +{ + return v4l2_pipeline_pm_use(entity, 1); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get); + +void v4l2_pipeline_pm_put(struct media_entity *entity) +{ + /* Powering off entities shouldn't fail. */ + WARN_ON(v4l2_pipeline_pm_use(entity, 0)); +} +EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put); int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, unsigned int notification) diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 1afd9c6ad908..8986c31176e9 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -340,6 +340,11 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || dst->vb2_buf.timestamp != src->vb2_buf.timestamp; + if (m2m_ctx->has_stopped) { + dprintk("Device has stopped\n"); + goto job_unlock; + } + if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { dprintk("Driver not ready\n"); @@ -556,6 +561,140 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); +/* + * This will add the LAST flag and mark the buffer management + * state as stopped. + * This is called when the last capture buffer must be flagged as LAST + * in draining mode from the encoder/decoder driver buf_queue() callback + * or from v4l2_update_last_buf_state() when a capture buffer is available. + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + vbuf->flags |= V4L2_BUF_FLAG_LAST; + vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + + v4l2_m2m_mark_stopped(m2m_ctx); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); + +/* When stop command is issued, update buffer management state */ +static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct vb2_v4l2_buffer *next_dst_buf; + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + return 0; + + m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); + m2m_ctx->is_draining = true; + + /* + * The processing of the last output buffer queued before + * the STOP command is expected to mark the buffer management + * state as stopped with v4l2_m2m_mark_stopped(). + */ + if (m2m_ctx->last_src_buf) + return 0; + + /* + * In case the output queue is empty, try to mark the last capture + * buffer as LAST. + */ + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) { + /* + * Wait for the next queued one in encoder/decoder driver + * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() + * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet + * streaming. + */ + m2m_ctx->next_buf_last = true; + return 0; + } + + v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); + + return 0; +} + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder drivers start_streaming() + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + /* If start streaming again, untag the last output buffer */ + if (V4L2_TYPE_IS_OUTPUT(q->type)) + m2m_ctx->last_src_buf = NULL; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); + +/* + * Updates the encoding/decoding buffer management state, should + * be called from encoder/decoder driver stop_streaming() + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + /* + * If in draining state, either mark next dst buffer as + * done or flag next one to be marked as done either + * in encoder/decoder driver buf_queue() callback using + * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() + * if encoder/decoder is not yet streaming + */ + if (m2m_ctx->is_draining) { + struct vb2_v4l2_buffer *next_dst_buf; + + m2m_ctx->last_src_buf = NULL; + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); + if (!next_dst_buf) + m2m_ctx->next_buf_last = true; + else + v4l2_m2m_last_buffer_done(m2m_ctx, + next_dst_buf); + } + } else { + v4l2_m2m_clear_state(m2m_ctx); + } +} +EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); + +static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q) +{ + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + unsigned int i; + + if (WARN_ON(q->is_output)) + return; + if (list_empty(&q->queued_list)) + return; + + vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); + for (i = 0; i < vb->num_planes; i++) + vb2_set_plane_payload(vb, i, 0); + + /* + * Since the buffer hasn't been queued to the ready queue, + * mark is active and owned before marking it LAST and DONE + */ + vb->state = VB2_BUF_STATE_ACTIVE; + atomic_inc(&q->owned_by_drv_count); + + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->field = V4L2_FIELD_NONE; + + v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); +} + int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { @@ -570,11 +709,25 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, __func__); return -EPERM; } + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); - if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) + if (ret) + return ret; + + /* + * If the capture queue is streaming, but streaming hasn't started + * on the device, but was asked to stop, mark the previously queued + * buffer as DONE with LAST flag since it won't be queued on the + * device. + */ + if (!V4L2_TYPE_IS_OUTPUT(vq->type) && + vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && + (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) + v4l2_m2m_force_last_buf_done(m2m_ctx, vq); + else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) v4l2_m2m_try_schedule(m2m_ctx); - return ret; + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); @@ -880,12 +1033,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, goto err_rel_entity1; /* Connect the three entities */ - ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rel_entity2; - ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rm_links0; @@ -1225,6 +1378,70 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); +/* + * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START + * Should be called from the encoder driver encoder_cmd() callback + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec) +{ + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) + return -EINVAL; + + if (ec->cmd == V4L2_ENC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); + +/* + * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START + * Should be called from the decoder driver decoder_cmd() callback + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc) +{ + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) + return -EINVAL; + + if (dc->cmd == V4L2_DEC_CMD_STOP) + return v4l2_update_last_buf_state(m2m_ctx); + + if (m2m_ctx->is_draining) + return -EBUSY; + + if (m2m_ctx->has_stopped) + m2m_ctx->has_stopped = false; + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); + +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *ec) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); + +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *dc) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); + int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc) { diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 25e5f24b3fec..5bdf57472314 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2112,8 +2112,8 @@ exit_done: return status; } -static int altera_get_note(u8 *p, s32 program_size, - s32 *offset, char *key, char *value, int length) +static int altera_get_note(u8 *p, s32 program_size, s32 *offset, + char *key, char *value, int keylen, int vallen) /* * Gets key and value of NOTE fields in the JBC file. * Can be called in two modes: if offset pointer is NULL, @@ -2170,7 +2170,7 @@ static int altera_get_note(u8 *p, s32 program_size, &p[note_table + (8 * i) + 4])]; if (value != NULL) - strlcpy(value, value_ptr, length); + strlcpy(value, value_ptr, vallen); } } @@ -2189,13 +2189,13 @@ static int altera_get_note(u8 *p, s32 program_size, strlcpy(key, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])], - length); + keylen); if (value != NULL) strlcpy(value, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])], - length); + vallen); *offset = i + 1; } @@ -2449,7 +2449,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw) __func__, (format_version == 2) ? "Jam STAPL" : "pre-standardized Jam 1.1"); while (altera_get_note((u8 *)fw->data, fw->size, - &offset, key, value, 256) == 0) + &offset, key, value, 32, 256) == 0) printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", __func__, key, value); } diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 4feed296a327..423fecc19fc4 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -394,7 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = { void rts522a_init_params(struct rtsx_pcr *pcr) { rts5227_init_params(pcr); - + pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11); pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; pcr->option.ocp_en = 1; diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index db936e4d6e56..1a81cda948c1 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -618,6 +618,7 @@ static const struct pcr_ops rts524a_pcr_ops = { void rts524a_init_params(struct rtsx_pcr *pcr) { rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; pcr->option.ltr_l1off_snooze_sspwrgate = LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; @@ -733,6 +734,7 @@ static const struct pcr_ops rts525a_pcr_ops = { void rts525a_init_params(struct rtsx_pcr *pcr) { rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11); pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; pcr->option.ltr_l1off_snooze_sspwrgate = LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 4214f02a17fd..711054ebad74 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -662,7 +662,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; pcr->aspm_en = ASPM_L1_EN; - pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); pcr->ic_version = rts5260_get_ic_version(pcr); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index bc4967a6efa1..78c3b1d424c3 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -764,7 +764,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; pcr->aspm_en = ASPM_L1_EN; - pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11); pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); pcr->ic_version = rts5261_get_ic_version(pcr); diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 031eb64549af..282c9ef68ed2 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -712,13 +712,14 @@ static int at24_probe(struct i2c_client *client) * chip is functional. */ err = at24_read(at24, 0, &test_byte, 1); - pm_runtime_idle(dev); if (err) { pm_runtime_disable(dev); regulator_disable(at24->vcc_reg); return -ENODEV; } + pm_runtime_idle(dev); + if (writable) dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n", byte_len, client->name, at24->write_max); diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index b155e9549076..b680b0caa69b 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -598,7 +598,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) goto out; } - hdev->asic_funcs->halt_coresight(hdev); + if (!hdev->hard_reset_pending) + hdev->asic_funcs->halt_coresight(hdev); + hdev->in_debug = 0; goto out; @@ -1189,6 +1191,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { dev_info(hdev->dev, "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->halt_engines(hdev, true); hdev->asic_funcs->hw_fini(hdev, true); } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 7344e8a222ae..b8a8de24aaf7 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -895,6 +895,11 @@ void goya_init_dma_qmans(struct hl_device *hdev) */ static void goya_disable_external_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + WREG32(mmDMA_QM_0_GLBL_CFG0, 0); WREG32(mmDMA_QM_1_GLBL_CFG0, 0); WREG32(mmDMA_QM_2_GLBL_CFG0, 0); @@ -956,6 +961,11 @@ static int goya_stop_external_queues(struct hl_device *hdev) { int rc, retval = 0; + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return retval; + rc = goya_stop_queue(hdev, mmDMA_QM_0_GLBL_CFG1, mmDMA_QM_0_CP_STS, @@ -1744,9 +1754,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev) */ static void goya_disable_internal_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto disable_tpc; + WREG32(mmMME_QM_GLBL_CFG0, 0); WREG32(mmMME_CMDQ_GLBL_CFG0, 0); +disable_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + WREG32(mmTPC0_QM_GLBL_CFG0, 0); WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0); @@ -1782,8 +1801,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev) */ static int goya_stop_internal_queues(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; int rc, retval = 0; + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto stop_tpc; + /* * Each queue (QMAN) is a separate H/W logic. That means that each * QMAN can be stopped independently and failure to stop one does NOT @@ -1810,6 +1833,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev) retval = -EIO; } +stop_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return retval; + rc = goya_stop_queue(hdev, mmTPC0_QM_GLBL_CFG1, mmTPC0_QM_CP_STS, @@ -1975,6 +2002,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev) static void goya_dma_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT); WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT); @@ -1984,6 +2016,11 @@ static void goya_dma_stall(struct hl_device *hdev) static void goya_tpc_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT); WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT); @@ -1996,6 +2033,11 @@ static void goya_tpc_stall(struct hl_device *hdev) static void goya_mme_stall(struct hl_device *hdev) { + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + return; + WREG32(mmMME_STALL, 0xFFFFFFFF); } @@ -4648,8 +4690,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, rc = goya_send_job_on_qman0(hdev, job); - hl_cb_put(job->patched_cb); - hl_debugfs_remove_job(hdev, job); kfree(job); cb->cs_cnt--; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index aa54d359dab7..a971c4bcc442 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, * the erase operation does not exceed the max_busy_timeout, we should * use R1B response. Or we need to prevent the host from doing hw busy * detection, which is done by converting to a R1 response instead. + * Note, some hosts requires R1B, which also means they are on their own + * when it comes to deal with the busy timeout. */ - if (card->host->max_busy_timeout && + if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) && + card->host->max_busy_timeout && busy_timeout > card->host->max_busy_timeout) { cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } else { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index f6912ded652d..de14b5845f52 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1910,9 +1910,12 @@ static int mmc_sleep(struct mmc_host *host) * If the max_busy_timeout of the host is specified, validate it against * the sleep cmd timeout. A failure means we need to prevent the host * from doing hw busy detection, which is done by converting to a R1 - * response instead of a R1B. + * response instead of a R1B. Note, some hosts requires R1B, which also + * means they are on their own when it comes to deal with the busy + * timeout. */ - if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) { + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && + (timeout_ms > host->max_busy_timeout)) { cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index da425ee2d9bf..e025604e17d4 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -542,9 +542,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, * If the max_busy_timeout of the host is specified, make sure it's * enough to fit the used timeout_ms. In case it's not, let's instruct * the host to avoid HW busy detection, by converting to a R1 response - * instead of a R1B. + * instead of a R1B. Note, some hosts requires R1B, which also means + * they are on their own when it comes to deal with the busy timeout. */ - if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && + (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index bd50935dc37d..11087976ab19 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -606,19 +606,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point, bool rx) { struct rtsx_pcr *pcr = host->pcr; - + u16 SD_VP_CTL = 0; dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", __func__, rx ? "RX" : "TX", sample_point); rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK); - if (rx) + if (rx) { + SD_VP_CTL = SD_VPRX_CTL; rtsx_pci_write_register(pcr, SD_VPRX_CTL, PHASE_SELECT_MASK, sample_point); - else + } else { + SD_VP_CTL = SD_VPTX_CTL; rtsx_pci_write_register(pcr, SD_VPTX_CTL, PHASE_SELECT_MASK, sample_point); - rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); - rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, + } + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 9651dca6863e..7e7d0d1aa36a 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -23,6 +23,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/mmc/host.h> #include <linux/mmc/pm.h> @@ -72,9 +73,16 @@ struct sdhci_acpi_host { const struct sdhci_acpi_slot *slot; struct platform_device *pdev; bool use_runtime_pm; + bool is_intel; + bool reset_signal_volt_on_suspend; unsigned long private[0] ____cacheline_aligned; }; +enum { + DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), + DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), +}; + static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) { return (void *)c->private; @@ -234,7 +242,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { static bool sdhci_acpi_byt(void) { static const struct x86_cpu_id byt[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), {} }; @@ -244,7 +252,7 @@ static bool sdhci_acpi_byt(void) static bool sdhci_acpi_cht(void) { static const struct x86_cpu_id cht[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), {} }; @@ -391,6 +399,8 @@ static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *ad host->mmc_host_ops.start_signal_voltage_switch = intel_start_signal_voltage_switch; + c->is_intel = true; + return 0; } @@ -647,6 +657,36 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); +static const struct dmi_system_id sdhci_acpi_quirks[] = { + { + /* + * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of + * the SHC1 ACPI device, this bug causes it to reprogram the + * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the + * card is (runtime) suspended + resumed. DLDO3 is used for + * the LCD and setting it to 1.8V causes the LCD to go black. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + }, + .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP, + }, + { + /* + * The Acer Aspire Switch 10 (SW5-012) microSD slot always + * reports the card being write-protected even though microSD + * cards do not have a write-protect switch at all. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), + }, + .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + }, + {} /* Terminating entry */ +}; + static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev) { const struct sdhci_acpi_uid_slot *u; @@ -663,17 +703,23 @@ static int sdhci_acpi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct sdhci_acpi_slot *slot; struct acpi_device *device, *child; + const struct dmi_system_id *id; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; resource_size_t len; size_t priv_size; + int quirks = 0; int err; device = ACPI_COMPANION(dev); if (!device) return -ENODEV; + id = dmi_first_match(sdhci_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + slot = sdhci_acpi_get_slot(device); /* Power on the SDHCI controller and its children */ @@ -759,6 +805,12 @@ static int sdhci_acpi_probe(struct platform_device *pdev) dev_warn(dev, "failed to setup card detect gpio\n"); c->use_runtime_pm = false; } + + if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) + c->reset_signal_volt_on_suspend = true; + + if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT) + host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; } err = sdhci_setup_host(host); @@ -823,17 +875,39 @@ static int sdhci_acpi_remove(struct platform_device *pdev) return 0; } +static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed( + struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct sdhci_host *host = c->host; + + if (c->is_intel && c->reset_signal_volt_on_suspend && + host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) { + struct intel_host *intel_host = sdhci_acpi_priv(c); + unsigned int fn = INTEL_DSM_V33_SWITCH; + u32 result = 0; + + intel_dsm(intel_host, dev, fn, &result); + } +} + #ifdef CONFIG_PM_SLEEP static int sdhci_acpi_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct sdhci_host *host = c->host; + int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); - return sdhci_suspend_host(host); + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + sdhci_acpi_reset_signal_voltage_if_needed(dev); + return 0; } static int sdhci_acpi_resume(struct device *dev) @@ -853,11 +927,17 @@ static int sdhci_acpi_runtime_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct sdhci_host *host = c->host; + int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); - return sdhci_runtime_suspend_host(host); + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + sdhci_acpi_reset_signal_voltage_if_needed(dev); + return 0; } static int sdhci_acpi_runtime_resume(struct device *dev) diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 5827d3751b81..e573495f8726 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -11,6 +11,7 @@ #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/of.h> +#include <linux/of_device.h> #include "sdhci-pltfm.h" @@ -235,6 +236,11 @@ static const struct sdhci_ops sdhci_cdns_ops = { .set_uhs_signaling = sdhci_cdns_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = { .ops = &sdhci_cdns_ops, }; @@ -334,6 +340,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, static int sdhci_cdns_probe(struct platform_device *pdev) { struct sdhci_host *host; + const struct sdhci_pltfm_data *data; struct sdhci_pltfm_host *pltfm_host; struct sdhci_cdns_priv *priv; struct clk *clk; @@ -350,8 +357,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) return ret; + data = of_device_get_match_data(dev); + if (!data) + data = &sdhci_cdns_pltfm_data; + nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node); - host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, + host = sdhci_pltfm_init(pdev, data, struct_size(priv, phy_params, nr_phy_params)); if (IS_ERR(host)) { ret = PTR_ERR(host); @@ -431,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = { }; static const struct of_device_id sdhci_cdns_match[] = { - { .compatible = "socionext,uniphier-sd4hc" }, + { + .compatible = "socionext,uniphier-sd4hc", + .data = &sdhci_cdns_uniphier_pltfm_data, + }, { .compatible = "cdns,sd4hc" }, { /* sentinel */ } }; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 382f25b2fa45..b2bdf5012c55 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1452,8 +1452,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) pdev->id_entry->driver_data; if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_add_request(&imx_data->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(imx_data->clk_ipg)) { @@ -1572,7 +1571,7 @@ disable_per_clk: clk_disable_unprepare(imx_data->clk_per); free_sdhci: if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_remove_request(&imx_data->pm_qos_req); + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); sdhci_pltfm_free(pdev); return err; } @@ -1595,7 +1594,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) clk_disable_unprepare(imx_data->clk_ahb); if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_remove_request(&imx_data->pm_qos_req); + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); sdhci_pltfm_free(pdev); @@ -1667,7 +1666,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev) clk_disable_unprepare(imx_data->clk_ahb); if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_remove_request(&imx_data->pm_qos_req); + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); return ret; } @@ -1680,8 +1679,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) int err; if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_add_request(&imx_data->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); err = clk_prepare_enable(imx_data->clk_ahb); if (err) @@ -1714,7 +1712,7 @@ disable_ahb_clk: clk_disable_unprepare(imx_data->clk_ahb); remove_pm_qos_request: if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) - pm_qos_remove_request(&imx_data->pm_qos_req); + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); return err; } #endif diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index c3a160c18047..3955fa5db43c 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1590,7 +1590,7 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) return 0; } -void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) { struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index ab2bd314a390..fcef5c0d0908 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -132,7 +132,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) sdhci_at91_set_force_card_detect(host); if (priv->cal_always_on && (mask & SDHCI_RESET_ALL)) @@ -427,8 +428,11 @@ static int sdhci_at91_probe(struct platform_device *pdev) * detection procedure using the SDMCC_CD signal is bypassed. * This bit is reset when a software reset for all command is performed * so we need to implement our own reset function to set back this bit. + * + * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line. */ - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) sdhci_at91_set_force_card_detect(host); pm_runtime_put_autosuspend(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 882053151a47..c4978177ef88 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1192,6 +1192,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) if (of_find_property(dev->of_node, "dmas", NULL)) sdhci_switch_external_dma(host, true); + /* R1B responses is required to properly manage HW busy detection. */ + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 5eea8d70a85d..ce15a05f23d4 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode) return 0; } +static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot) +{ + int ret; + + ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + pr_warn("%s: enable PCI MSI failed, error=%d\n", + mmc_hostname(slot->host->mmc), ret); + return; + } + + slot->host->irq = pci_irq_vector(slot->chip->pdev, 0); +} + static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot) { struct sdhci_host *host = slot->host; + gli_pcie_enable_msi(slot); slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; sdhci_enable_v4_mode(host); @@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot) { struct sdhci_host *host = slot->host; + gli_pcie_enable_msi(slot); slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; sdhci_enable_v4_mode(host); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 403ac44a7378..a25c3a4d3f6c 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) host->mmc->caps |= MMC_CAP_1_8V_DDR; + /* R1B responses is required to properly manage HW busy detection. */ + host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + tegra_sdhci_parse_dt(host); tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", diff --git a/drivers/most/Kconfig b/drivers/most/Kconfig new file mode 100644 index 000000000000..58d7999170a7 --- /dev/null +++ b/drivers/most/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig MOST + tristate "MOST support" + depends on HAS_DMA && CONFIGFS_FS + default n + help + Say Y here if you want to enable MOST support. + This driver needs at least one additional component to enable the + desired access from userspace (e.g. character devices) and one that + matches the network controller's hardware interface (e.g. USB). + + To compile this driver as a module, choose M here: the + module will be called most_core. + + If in doubt, say N here. diff --git a/drivers/most/Makefile b/drivers/most/Makefile new file mode 100644 index 000000000000..e810cd3a47ee --- /dev/null +++ b/drivers/most/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MOST) += most_core.o +most_core-y := core.o \ + configfs.o diff --git a/drivers/staging/most/configfs.c b/drivers/most/configfs.c index 9a961222f458..27b0c923597f 100644 --- a/drivers/staging/most/configfs.c +++ b/drivers/most/configfs.c @@ -10,8 +10,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/configfs.h> - -#include "most.h" +#include <linux/most.h> #define MAX_STRING_SIZE 80 diff --git a/drivers/staging/most/core.c b/drivers/most/core.c index 0c4ae6920d77..06426fc5c990 100644 --- a/drivers/staging/most/core.c +++ b/drivers/most/core.c @@ -20,8 +20,7 @@ #include <linux/kthread.h> #include <linux/dma-mapping.h> #include <linux/idr.h> - -#include "most.h" +#include <linux/most.h> #define MAX_CHANNELS 64 #define STRING_SIZE 80 @@ -472,7 +471,7 @@ static int print_links(struct device *dev, void *data) list_for_each_entry(c, &iface->p->channel_list, list) { if (c->pipe0.comp) { - offs += snprintf(buf + offs, + offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s:%s:%s\n", c->pipe0.comp->name, @@ -480,7 +479,7 @@ static int print_links(struct device *dev, void *data) dev_name(&c->dev)); } if (c->pipe1.comp) { - offs += snprintf(buf + offs, + offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s:%s:%s\n", c->pipe1.comp->name, @@ -519,7 +518,7 @@ static ssize_t components_show(struct device_driver *drv, char *buf) int offs = 0; list_for_each_entry(comp, &comp_list, list) { - offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n", + offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n", comp->name); } return offs; diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index c1eda67d1ad2..267b9000782e 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -52,14 +52,6 @@ config SPI_HISI_SFC help This enables support for HiSilicon FMC SPI-NOR flash controller. -config SPI_MTK_QUADSPI - tristate "MediaTek Quad SPI controller" - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. - This controller does not support generic SPI. It only supports - SPI NOR. - config SPI_NXP_SPIFI tristate "NXP SPI Flash Interface (SPIFI)" depends on OF && (ARCH_LPC18XX || COMPILE_TEST) diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 9c5ed03cdc19..738dfd74cf76 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -3,7 +3,6 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o -obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c deleted file mode 100644 index b1691680d174..000000000000 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ /dev/null @@ -1,565 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - * Author: Bayi Cheng <bayi.cheng@mediatek.com> - */ - -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/iopoll.h> -#include <linux/ioport.h> -#include <linux/math64.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/partitions.h> -#include <linux/mtd/spi-nor.h> - -#define MTK_NOR_CMD_REG 0x00 -#define MTK_NOR_CNT_REG 0x04 -#define MTK_NOR_RDSR_REG 0x08 -#define MTK_NOR_RDATA_REG 0x0c -#define MTK_NOR_RADR0_REG 0x10 -#define MTK_NOR_RADR1_REG 0x14 -#define MTK_NOR_RADR2_REG 0x18 -#define MTK_NOR_WDATA_REG 0x1c -#define MTK_NOR_PRGDATA0_REG 0x20 -#define MTK_NOR_PRGDATA1_REG 0x24 -#define MTK_NOR_PRGDATA2_REG 0x28 -#define MTK_NOR_PRGDATA3_REG 0x2c -#define MTK_NOR_PRGDATA4_REG 0x30 -#define MTK_NOR_PRGDATA5_REG 0x34 -#define MTK_NOR_SHREG0_REG 0x38 -#define MTK_NOR_SHREG1_REG 0x3c -#define MTK_NOR_SHREG2_REG 0x40 -#define MTK_NOR_SHREG3_REG 0x44 -#define MTK_NOR_SHREG4_REG 0x48 -#define MTK_NOR_SHREG5_REG 0x4c -#define MTK_NOR_SHREG6_REG 0x50 -#define MTK_NOR_SHREG7_REG 0x54 -#define MTK_NOR_SHREG8_REG 0x58 -#define MTK_NOR_SHREG9_REG 0x5c -#define MTK_NOR_CFG1_REG 0x60 -#define MTK_NOR_CFG2_REG 0x64 -#define MTK_NOR_CFG3_REG 0x68 -#define MTK_NOR_STATUS0_REG 0x70 -#define MTK_NOR_STATUS1_REG 0x74 -#define MTK_NOR_STATUS2_REG 0x78 -#define MTK_NOR_STATUS3_REG 0x7c -#define MTK_NOR_FLHCFG_REG 0x84 -#define MTK_NOR_TIME_REG 0x94 -#define MTK_NOR_PP_DATA_REG 0x98 -#define MTK_NOR_PREBUF_STUS_REG 0x9c -#define MTK_NOR_DELSEL0_REG 0xa0 -#define MTK_NOR_DELSEL1_REG 0xa4 -#define MTK_NOR_INTRSTUS_REG 0xa8 -#define MTK_NOR_INTREN_REG 0xac -#define MTK_NOR_CHKSUM_CTL_REG 0xb8 -#define MTK_NOR_CHKSUM_REG 0xbc -#define MTK_NOR_CMD2_REG 0xc0 -#define MTK_NOR_WRPROT_REG 0xc4 -#define MTK_NOR_RADR3_REG 0xc8 -#define MTK_NOR_DUAL_REG 0xcc -#define MTK_NOR_DELSEL2_REG 0xd0 -#define MTK_NOR_DELSEL3_REG 0xd4 -#define MTK_NOR_DELSEL4_REG 0xd8 - -/* commands for mtk nor controller */ -#define MTK_NOR_READ_CMD 0x0 -#define MTK_NOR_RDSR_CMD 0x2 -#define MTK_NOR_PRG_CMD 0x4 -#define MTK_NOR_WR_CMD 0x10 -#define MTK_NOR_PIO_WR_CMD 0x90 -#define MTK_NOR_WRSR_CMD 0x20 -#define MTK_NOR_PIO_READ_CMD 0x81 -#define MTK_NOR_WR_BUF_ENABLE 0x1 -#define MTK_NOR_WR_BUF_DISABLE 0x0 -#define MTK_NOR_ENABLE_SF_CMD 0x30 -#define MTK_NOR_DUAD_ADDR_EN 0x8 -#define MTK_NOR_QUAD_READ_EN 0x4 -#define MTK_NOR_DUAL_ADDR_EN 0x2 -#define MTK_NOR_DUAL_READ_EN 0x1 -#define MTK_NOR_DUAL_DISABLE 0x0 -#define MTK_NOR_FAST_READ 0x1 - -#define SFLASH_WRBUF_SIZE 128 - -/* Can shift up to 48 bits (6 bytes) of TX/RX */ -#define MTK_NOR_MAX_RX_TX_SHIFT 6 -/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */ -#define MTK_NOR_MAX_SHIFT 7 -/* nor controller 4-byte address mode enable bit */ -#define MTK_NOR_4B_ADDR_EN BIT(4) - -/* Helpers for accessing the program data / shift data registers */ -#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n)) -#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n)) - -struct mtk_nor { - struct spi_nor nor; - struct device *dev; - void __iomem *base; /* nor flash base address */ - struct clk *spi_clk; - struct clk *nor_clk; -}; - -static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor) -{ - struct spi_nor *nor = &mtk_nor->nor; - - switch (nor->read_proto) { - case SNOR_PROTO_1_1_1: - writeb(nor->read_opcode, mtk_nor->base + - MTK_NOR_PRGDATA3_REG); - writeb(MTK_NOR_FAST_READ, mtk_nor->base + - MTK_NOR_CFG1_REG); - break; - case SNOR_PROTO_1_1_2: - writeb(nor->read_opcode, mtk_nor->base + - MTK_NOR_PRGDATA3_REG); - writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base + - MTK_NOR_DUAL_REG); - break; - case SNOR_PROTO_1_1_4: - writeb(nor->read_opcode, mtk_nor->base + - MTK_NOR_PRGDATA4_REG); - writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base + - MTK_NOR_DUAL_REG); - break; - default: - writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base + - MTK_NOR_DUAL_REG); - break; - } -} - -static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval) -{ - int reg; - u8 val = cmdval & 0x1f; - - writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG); - return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg, - !(reg & val), 100, 10000); -} - -static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op, - const u8 *tx, size_t txlen, u8 *rx, size_t rxlen) -{ - size_t len = 1 + txlen + rxlen; - int i, ret, idx; - - if (len > MTK_NOR_MAX_SHIFT) - return -EINVAL; - - writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG); - - /* start at PRGDATA5, go down to PRGDATA0 */ - idx = MTK_NOR_MAX_RX_TX_SHIFT - 1; - - /* opcode */ - writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx)); - idx--; - - /* program TX data */ - for (i = 0; i < txlen; i++, idx--) - writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx)); - - /* clear out rest of TX registers */ - while (idx >= 0) { - writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx)); - idx--; - } - - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD); - if (ret) - return ret; - - /* restart at first RX byte */ - idx = rxlen - 1; - - /* read out RX data */ - for (i = 0; i < rxlen; i++, idx--) - rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx)); - - return 0; -} - -/* Do a WRSR (Write Status Register) command */ -static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr) -{ - writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG); - writeb(8, mtk_nor->base + MTK_NOR_CNT_REG); - return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD); -} - -static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor) -{ - u8 reg; - - /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer - * 0: pre-fetch buffer use for read - * 1: pre-fetch buffer use for page program - */ - writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG); - return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg, - 0x01 == (reg & 0x01), 100, 10000); -} - -static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor) -{ - u8 reg; - - writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG); - return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg, - MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100, - 10000); -} - -static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor) -{ - u8 val; - struct spi_nor *nor = &mtk_nor->nor; - - val = readb(mtk_nor->base + MTK_NOR_DUAL_REG); - - switch (nor->addr_width) { - case 3: - val &= ~MTK_NOR_4B_ADDR_EN; - break; - case 4: - val |= MTK_NOR_4B_ADDR_EN; - break; - default: - dev_warn(mtk_nor->dev, "Unexpected address width %u.\n", - nor->addr_width); - break; - } - - writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG); -} - -static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr) -{ - int i; - - mtk_nor_set_addr_width(mtk_nor); - - for (i = 0; i < 3; i++) { - writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4); - addr >>= 8; - } - /* Last register is non-contiguous */ - writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG); -} - -static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length, - u_char *buffer) -{ - int i, ret; - int addr = (int)from; - u8 *buf = (u8 *)buffer; - struct mtk_nor *mtk_nor = nor->priv; - - /* set mode for fast read mode ,dual mode or quad mode */ - mtk_nor_set_read_mode(mtk_nor); - mtk_nor_set_addr(mtk_nor, addr); - - for (i = 0; i < length; i++) { - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD); - if (ret < 0) - return ret; - buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG); - } - return length; -} - -static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor, - int addr, int length, u8 *data) -{ - int i, ret; - - mtk_nor_set_addr(mtk_nor, addr); - - for (i = 0; i < length; i++) { - writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG); - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD); - if (ret < 0) - return ret; - } - return 0; -} - -static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr, - const u8 *buf) -{ - int i, bufidx, data; - - mtk_nor_set_addr(mtk_nor, addr); - - bufidx = 0; - for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) { - data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 | - buf[bufidx + 1]<<8 | buf[bufidx]; - bufidx += 4; - writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG); - } - return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD); -} - -static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len, - const u_char *buf) -{ - int ret; - struct mtk_nor *mtk_nor = nor->priv; - size_t i; - - ret = mtk_nor_write_buffer_enable(mtk_nor); - if (ret < 0) { - dev_warn(mtk_nor->dev, "write buffer enable failed!\n"); - return ret; - } - - for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) { - ret = mtk_nor_write_buffer(mtk_nor, to, buf); - if (ret < 0) { - dev_err(mtk_nor->dev, "write buffer failed!\n"); - return ret; - } - to += SFLASH_WRBUF_SIZE; - buf += SFLASH_WRBUF_SIZE; - } - ret = mtk_nor_write_buffer_disable(mtk_nor); - if (ret < 0) { - dev_warn(mtk_nor->dev, "write buffer disable failed!\n"); - return ret; - } - - if (i < len) { - ret = mtk_nor_write_single_byte(mtk_nor, to, - (int)(len - i), (u8 *)buf); - if (ret < 0) { - dev_err(mtk_nor->dev, "write single byte failed!\n"); - return ret; - } - } - - return len; -} - -static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) -{ - int ret; - struct mtk_nor *mtk_nor = nor->priv; - - switch (opcode) { - case SPINOR_OP_RDSR: - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD); - if (ret < 0) - return ret; - if (len == 1) - *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG); - else - dev_err(mtk_nor->dev, "len should be 1 for read status!\n"); - break; - default: - ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len); - break; - } - return ret; -} - -static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, - size_t len) -{ - int ret; - struct mtk_nor *mtk_nor = nor->priv; - - switch (opcode) { - case SPINOR_OP_WRSR: - /* We only handle 1 byte */ - ret = mtk_nor_wr_sr(mtk_nor, *buf); - break; - default: - ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0); - if (ret) - dev_warn(mtk_nor->dev, "write reg failure!\n"); - break; - } - return ret; -} - -static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor) -{ - clk_disable_unprepare(mtk_nor->spi_clk); - clk_disable_unprepare(mtk_nor->nor_clk); -} - -static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor) -{ - int ret; - - ret = clk_prepare_enable(mtk_nor->spi_clk); - if (ret) - return ret; - - ret = clk_prepare_enable(mtk_nor->nor_clk); - if (ret) { - clk_disable_unprepare(mtk_nor->spi_clk); - return ret; - } - - return 0; -} - -static const struct spi_nor_controller_ops mtk_controller_ops = { - .read_reg = mtk_nor_read_reg, - .write_reg = mtk_nor_write_reg, - .read = mtk_nor_read, - .write = mtk_nor_write, -}; - -static int mtk_nor_init(struct mtk_nor *mtk_nor, - struct device_node *flash_node) -{ - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ | - SNOR_HWCAPS_READ_FAST | - SNOR_HWCAPS_READ_1_1_2 | - SNOR_HWCAPS_PP, - }; - int ret; - struct spi_nor *nor; - - /* initialize controller to accept commands */ - writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG); - - nor = &mtk_nor->nor; - nor->dev = mtk_nor->dev; - nor->priv = mtk_nor; - spi_nor_set_flash_node(nor, flash_node); - nor->controller_ops = &mtk_controller_ops; - - nor->mtd.name = "mtk_nor"; - /* initialized with NULL */ - ret = spi_nor_scan(nor, NULL, &hwcaps); - if (ret) - return ret; - - return mtd_device_register(&nor->mtd, NULL, 0); -} - -static int mtk_nor_drv_probe(struct platform_device *pdev) -{ - struct device_node *flash_np; - struct resource *res; - int ret; - struct mtk_nor *mtk_nor; - - if (!pdev->dev.of_node) { - dev_err(&pdev->dev, "No DT found\n"); - return -EINVAL; - } - - mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL); - if (!mtk_nor) - return -ENOMEM; - platform_set_drvdata(pdev, mtk_nor); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mtk_nor->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mtk_nor->base)) - return PTR_ERR(mtk_nor->base); - - mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi"); - if (IS_ERR(mtk_nor->spi_clk)) - return PTR_ERR(mtk_nor->spi_clk); - - mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf"); - if (IS_ERR(mtk_nor->nor_clk)) - return PTR_ERR(mtk_nor->nor_clk); - - mtk_nor->dev = &pdev->dev; - - ret = mtk_nor_enable_clk(mtk_nor); - if (ret) - return ret; - - /* only support one attached flash */ - flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); - if (!flash_np) { - dev_err(&pdev->dev, "no SPI flash device to configure\n"); - ret = -ENODEV; - goto nor_free; - } - ret = mtk_nor_init(mtk_nor, flash_np); - -nor_free: - if (ret) - mtk_nor_disable_clk(mtk_nor); - - return ret; -} - -static int mtk_nor_drv_remove(struct platform_device *pdev) -{ - struct mtk_nor *mtk_nor = platform_get_drvdata(pdev); - - mtk_nor_disable_clk(mtk_nor); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int mtk_nor_suspend(struct device *dev) -{ - struct mtk_nor *mtk_nor = dev_get_drvdata(dev); - - mtk_nor_disable_clk(mtk_nor); - - return 0; -} - -static int mtk_nor_resume(struct device *dev) -{ - struct mtk_nor *mtk_nor = dev_get_drvdata(dev); - - return mtk_nor_enable_clk(mtk_nor); -} - -static const struct dev_pm_ops mtk_nor_dev_pm_ops = { - .suspend = mtk_nor_suspend, - .resume = mtk_nor_resume, -}; - -#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops) -#else -#define MTK_NOR_DEV_PM_OPS NULL -#endif - -static const struct of_device_id mtk_nor_of_ids[] = { - { .compatible = "mediatek,mt8173-nor"}, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, mtk_nor_of_ids); - -static struct platform_driver mtk_nor_driver = { - .probe = mtk_nor_drv_probe, - .remove = mtk_nor_drv_remove, - .driver = { - .name = "mtk-nor", - .pm = MTK_NOR_DEV_PM_OPS, - .of_match_table = mtk_nor_of_ids, - }, -}; - -module_platform_driver(mtk_nor_driver); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 25a8f9387d5a..db8884ad6d40 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -149,6 +149,7 @@ config NET_FC config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT + select NET_REDIRECT ---help--- This is an intermediate driver that allows sharing of resources. diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 1cc2cd894f87..c81698550e5a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -50,11 +50,6 @@ struct arp_pkt { }; #pragma pack() -static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) -{ - return (struct arp_pkt *)skb_network_header(skb); -} - /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], bool strict_match); @@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) spin_unlock(&bond->mode_lock); } -static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) +static struct slave *rlb_choose_channel(struct sk_buff *skb, + struct bonding *bond, + const struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct arp_pkt *arp = arp_pkt(skb); struct slave *assigned_slave, *curr_active_slave; struct rlb_client_info *client_info; u32 hash_index = 0; @@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon */ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) { - struct arp_pkt *arp = arp_pkt(skb); struct slave *tx_slave = NULL; + struct arp_pkt *arp; + + if (!pskb_network_may_pull(skb, sizeof(*arp))) + return NULL; + arp = (struct arp_pkt *)skb_network_header(skb); /* Don't modify or load balance ARPs that do not originate locally * (e.g.,arrive via a bridge). @@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) if (arp->op_code == htons(ARPOP_REPLY)) { /* the arp must be sent on the selected rx channel */ - tx_slave = rlb_choose_channel(skb, bond); + tx_slave = rlb_choose_channel(skb, bond, arp); if (tx_slave) bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr, tx_slave->dev->addr_len); @@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) * When the arp reply is received the entry will be updated * with the correct unicast address of the client. */ - tx_slave = rlb_choose_channel(skb, bond); + tx_slave = rlb_choose_channel(skb, bond, arp); /* The ARP reply packets must be delayed so that * they can cancel out the influence of the ARP request. diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 48d5ec770b94..d10805e5e623 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3526,6 +3526,47 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +#ifdef CONFIG_LOCKDEP +static int bond_get_lowest_level_rcu(struct net_device *dev) +{ + struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; + struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; + int cur = 0, max = 0; + + now = dev; + iter = &dev->adj_list.lower; + + while (1) { + next = NULL; + while (1) { + ldev = netdev_next_lower_dev_rcu(now, &iter); + if (!ldev) + break; + + next = ldev; + niter = &ldev->adj_list.lower; + dev_stack[cur] = now; + iter_stack[cur++] = iter; + if (max <= cur) + max = cur; + break; + } + + if (!next) { + if (!cur) + return max; + next = dev_stack[--cur]; + niter = iter_stack[cur]; + } + + now = next; + iter = niter; + } + + return max; +} +#endif + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3533,11 +3574,17 @@ static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 temp; struct list_head *iter; struct slave *slave; + int nest_level = 0; - spin_lock(&bond->stats_lock); - memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); +#ifdef CONFIG_LOCKDEP + nest_level = bond_get_lowest_level_rcu(bond_dev); +#endif + + spin_lock_nested(&bond->stats_lock, nest_level); + memcpy(stats, &bond->bond_stats, sizeof(*stats)); + bond_for_each_slave_rcu(bond, slave, iter) { const struct rtnl_link_stats64 *new = dev_get_stats(slave->dev, &temp); @@ -3547,10 +3594,10 @@ static void bond_get_stats(struct net_device *bond_dev, /* save off the slave stats for the next run */ memcpy(&slave->slave_stats, new, sizeof(*new)); } - rcu_read_unlock(); memcpy(&bond->bond_stats, stats, sizeof(*stats)); spin_unlock(&bond->stats_lock); + rcu_read_unlock(); } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) @@ -3640,6 +3687,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd case BOND_RELEASE_OLD: case SIOCBONDRELEASE: res = bond_release(bond_dev, slave_dev); + if (!res) + netdev_update_lockdep_key(slave_dev); break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ddb3916d3506..215c10923289 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1398,6 +1398,8 @@ static int bond_option_slaves_set(struct bonding *bond, case '-': slave_dbg(bond->dev, dev, "Releasing interface\n"); ret = bond_release(bond->dev, dev); + if (!ret) + netdev_update_lockdep_key(dev); break; default: diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 8e81bdf98ac6..63f2548f5b1b 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -141,29 +141,29 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf, return 0; /* Print out debug information. */ - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "CAIF SPI debug information:\n"); - - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR); - - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "STATE: %d\n", cfspi->dbg_state); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous CMD: 0x%x\n", cfspi->pcmd); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current CMD: 0x%x\n", cfspi->cmd); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous TX len: %d\n", cfspi->tx_ppck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous RX len: %d\n", cfspi->rx_ppck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current TX len: %d\n", cfspi->tx_cpck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current RX len: %d\n", cfspi->rx_cpck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Next TX len: %d\n", cfspi->tx_npck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Next RX len: %d\n", cfspi->rx_npck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "CAIF SPI debug information:\n"); + + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR); + + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "STATE: %d\n", cfspi->dbg_state); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous CMD: 0x%x\n", cfspi->pcmd); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current CMD: 0x%x\n", cfspi->cmd); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous TX len: %d\n", cfspi->tx_ppck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous RX len: %d\n", cfspi->rx_ppck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current TX len: %d\n", cfspi->tx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current RX len: %d\n", cfspi->rx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next TX len: %d\n", cfspi->tx_npck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next RX len: %d\n", cfspi->rx_npck_len); if (len > DEBUGFS_BUF_SIZE) len = DEBUGFS_BUF_SIZE; @@ -180,23 +180,23 @@ static ssize_t print_frame(char *buf, size_t size, char *frm, int len = 0; int i; for (i = 0; i < count; i++) { - len += snprintf((buf + len), (size - len), + len += scnprintf((buf + len), (size - len), "[0x" BYTE_HEX_FMT "]", frm[i]); if ((i == cut) && (count > (cut * 2))) { /* Fast forward. */ i = count - cut; - len += snprintf((buf + len), (size - len), - "--- %zu bytes skipped ---\n", - count - (cut * 2)); + len += scnprintf((buf + len), (size - len), + "--- %zu bytes skipped ---\n", + count - (cut * 2)); } if ((!(i % 10)) && i) { - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "\n"); } } - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); return len; } @@ -214,18 +214,18 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, return 0; /* Print out debug information. */ - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current frame:\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current frame:\n"); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Tx data (Len: %d):\n", cfspi->tx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Tx data (Len: %d):\n", cfspi->tx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), cfspi->xfer.va_tx[0], (cfspi->tx_cpck_len + SPI_CMD_SZ), 100); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Rx data (Len: %d):\n", cfspi->rx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Rx data (Len: %d):\n", cfspi->rx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), cfspi->xfer.va_rx, diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 6ee06a49fb4c..68834a2853c9 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -883,6 +883,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, }; static int can_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 2f5c287eac95..a3664281a33f 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -625,7 +625,10 @@ err_free_chan: tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); slc_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); free_netdev(sl->dev); + return err; err_exit: rtnl_unlock(); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 449a22172e07..1a69286daa8d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1366,6 +1366,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_get_vlan_entry(dev, vid, vl); + if (vid == 0 && vid == b53_default_pvid(dev)) + untagged = true; + vl->members |= BIT(port); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag |= BIT(port); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d1955543acd1..b0f5280a83cb 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - if (priv->type == BCM7278_DEVICE_ID) - reg |= GMII_SPEED_UP_2G; + reg &= ~GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 022466ca1c19..7cbd1bd4c5a6 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds) static void mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable) { - u32 mask = PMCR_TX_EN | PMCR_RX_EN; + u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK; if (enable) mt7530_set(priv, MT7530_PMCR_P(port), mask); @@ -1444,7 +1444,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 | PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN); mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | - PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK; + PMCR_BACKPR_EN | PMCR_FORCE_MODE; /* Are we connected to external phy */ if (port == 5 && dsa_is_user_port(ds, 5)) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8c9289549688..2f993e673ec7 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2769,6 +2769,8 @@ static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip, goto unlock; } + occupancy &= MV88E6XXX_G2_ATU_STATS_MASK; + unlock: mv88e6xxx_reg_unlock(chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index b016cc205f81..ca3a7a7a73c3 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -278,13 +278,13 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, switch (direction) { case MV88E6XXX_EGRESS_DIR_INGRESS: dest_port_chip = &chip->ingress_dest_port; - reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; + reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK); break; case MV88E6XXX_EGRESS_DIR_EGRESS: dest_port_chip = &chip->egress_dest_port; - reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; + reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); break; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 01503014b1ee..8fd483020c5b 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1099,6 +1099,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { int err, irq, virq; + chip->g2_irq.masked = ~0; + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked); + mv88e6xxx_reg_unlock(chip); + if (err) + return err; + chip->g2_irq.domain = irq_domain_add_simple( chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); if (!chip->g2_irq.domain) @@ -1108,7 +1115,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) irq_create_mapping(chip->g2_irq.domain, irq); chip->g2_irq.chip = mv88e6xxx_g2_irq_chip; - chip->g2_irq.masked = ~0; chip->device_irq = irq_find_mapping(chip->g1_irq.domain, MV88E6XXX_G1_STS_IRQ_DEVICE); diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 03ba6d25f7fe..7edea5741a5f 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1741,7 +1741,8 @@ static void sja1105_teardown(struct dsa_switch *ds) if (!dsa_is_user_port(ds, port)) continue; - kthread_destroy_worker(sp->xmit_worker); + if (sp->xmit_worker) + kthread_destroy_worker(sp->xmit_worker); } sja1105_tas_teardown(ds); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0b2fd96b93d7..cada6e7e30f4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1018,13 +1018,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) struct ena_rx_buffer *rx_info; req_id = rx_ring->free_ids[next_to_use]; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc < 0)) - break; rx_info = &rx_ring->rx_buffer_info[req_id]; - rc = ena_alloc_rx_page(rx_ring, rx_info, GFP_ATOMIC | __GFP_COMP); if (unlikely(rc < 0)) { @@ -1379,9 +1375,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info; u16 len, req_id, buf = 0; void *va; + int rc; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; + + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc < 0)) + return NULL; + rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(!rx_info->page)) { @@ -1454,6 +1456,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, buf++; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; + + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc < 0)) + return NULL; + rx_info = &rx_ring->rx_buffer_info[req_id]; } while (1); @@ -1968,7 +1975,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) } /* Reserved the max msix vectors we might need */ - msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues); + msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); @@ -2068,6 +2075,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter) static int ena_request_io_irq(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; unsigned long flags = 0; struct ena_irq *irq; int rc = 0, i, k; @@ -2078,7 +2086,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter) return -EINVAL; } - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; rc = request_irq(irq->vector, irq->handler, flags, irq->name, irq->data); @@ -2119,6 +2127,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter) static void ena_free_io_irq(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; struct ena_irq *irq; int i; @@ -2129,7 +2138,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter) } #endif /* CONFIG_RFS_ACCEL */ - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; irq_set_affinity_hint(irq->vector, NULL); free_irq(irq->vector, irq->data); @@ -2144,12 +2153,13 @@ static void ena_disable_msix(struct ena_adapter *adapter) static void ena_disable_io_intr_sync(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; int i; if (!netif_running(adapter->netdev)) return; - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) synchronize_irq(adapter->irq_tbl[i].vector); } @@ -3476,6 +3486,7 @@ static int ena_restore_device(struct ena_adapter *adapter) netif_carrier_on(adapter->netdev); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); + adapter->last_keep_alive_jiffies = jiffies; dev_err(&pdev->dev, "Device reset completed successfully, Driver info: %s\n", version); @@ -4325,13 +4336,15 @@ err_disable_device: /*****************************************************************************/ -/* ena_remove - Device Removal Routine +/* __ena_shutoff - Helper used in both PCI remove/shutdown routines * @pdev: PCI device information struct + * @shutdown: Is it a shutdown operation? If false, means it is a removal * - * ena_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. + * __ena_shutoff is a helper routine that does the real work on shutdown and + * removal paths; the difference between those paths is with regards to whether + * dettach or unregister the netdevice. */ -static void ena_remove(struct pci_dev *pdev) +static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) { struct ena_adapter *adapter = pci_get_drvdata(pdev); struct ena_com_dev *ena_dev; @@ -4350,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); - rtnl_lock(); + rtnl_lock(); /* lock released inside the below if-else block */ ena_destroy_device(adapter, true); - rtnl_unlock(); - - unregister_netdev(netdev); - - free_netdev(netdev); + if (shutdown) { + netif_device_detach(netdev); + dev_close(netdev); + rtnl_unlock(); + } else { + rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); + } ena_com_rss_destroy(ena_dev); @@ -4371,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev) vfree(ena_dev); } +/* ena_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ena_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ + +static void ena_remove(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, false); +} + +/* ena_shutdown - Device Shutdown Routine + * @pdev: PCI device information struct + * + * ena_shutdown is called by the PCI subsystem to alert the driver that + * a shutdown/reboot (or kexec) is happening and device must be disabled. + */ + +static void ena_shutdown(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, true); +} + #ifdef CONFIG_PM /* ena_suspend - PM suspend callback * @pdev: PCI device information struct @@ -4420,6 +4461,7 @@ static struct pci_driver ena_pci_driver = { .id_table = ena_pci_tbl, .probe = ena_probe, .remove = ena_remove, + .shutdown = ena_shutdown, #ifdef CONFIG_PM .suspend = ena_suspend, .resume = ena_resume, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a1f99bef4a68..7b55633d2cb9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -722,6 +722,11 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags) if (flags & ~AQ_PRIV_FLAGS_MASK) return -EOPNOTSUPP; + if (hweight32((flags | priv_flags) & AQ_HW_LOOPBACK_MASK) > 1) { + netdev_info(ndev, "Can't enable more than one loopback simultaneously\n"); + return -EINVAL; + } + cfg->priv_flags = flags; if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 6102251bb909..03ff92bc4a7f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -163,7 +163,7 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic, } if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && - (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci), + (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK, aq_nic->active_vlans))) { netdev_err(aq_nic->ndev, "ethtool: unknown vlan-id specified"); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index cc70c606b6ef..251767c31f7e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -337,6 +337,8 @@ struct aq_fw_ops { void (*enable_ptp)(struct aq_hw_s *self, int enable); + void (*adjust_ptp)(struct aq_hw_s *self, uint64_t adj); + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed); int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index c85e3e29012c..e95f6a6bef73 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -533,8 +533,10 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, dx_buff->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) { + ret = 0; goto exit; + } first = dx_buff; dx_buff->len_pkt = skb->len; @@ -655,10 +657,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) if (likely(frags)) { err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, ring, frags); - if (err >= 0) { - ++ring->stats.tx.packets; - ring->stats.tx.bytes += skb->len; - } } else { err = NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 6b27af0db499..78b6f3248756 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -359,7 +359,8 @@ static int aq_suspend_common(struct device *dev, bool deep) netif_device_detach(nic->ndev); netif_tx_stop_all_queues(nic->ndev); - aq_nic_stop(nic); + if (netif_running(nic->ndev)) + aq_nic_stop(nic); if (deep) { aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); @@ -375,7 +376,7 @@ static int atl_resume_common(struct device *dev, bool deep) { struct pci_dev *pdev = to_pci_dev(dev); struct aq_nic_s *nic; - int ret; + int ret = 0; nic = pci_get_drvdata(pdev); @@ -390,9 +391,11 @@ static int atl_resume_common(struct device *dev, bool deep) goto err_exit; } - ret = aq_nic_start(nic); - if (ret) - goto err_exit; + if (netif_running(nic->ndev)) { + ret = aq_nic_start(nic); + if (ret) + goto err_exit; + } netif_device_attach(nic->ndev); netif_tx_start_all_queues(nic->ndev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 951d86f8b66e..bae95a618560 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -272,9 +272,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop)) - dev_kfree_skb_any(buff->skb); + if (unlikely(buff->is_eop)) { + ++self->stats.rx.packets; + self->stats.tx.bytes += buff->skb->len; + dev_kfree_skb_any(buff->skb); + } buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); @@ -351,7 +354,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, err = 0; goto err_exit; } - if (buff->is_error || buff->is_cso_err) { + if (buff->is_error || + (buff->is_lro && buff->is_cso_err)) { buff_ = buff; do { next_ = buff_->next, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 991e4d31b094..2c96f20f6289 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -78,7 +78,8 @@ struct __packed aq_ring_buff_s { u32 is_cleaned:1; u32 is_error:1; u32 is_vlan:1; - u32 rsvd3:4; + u32 is_lro:1; + u32 rsvd3:3; u16 eop_index; u16 rsvd4; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ec041f78d063..d20d91cdece8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -823,6 +823,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, } } + buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status); if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % AQ_CFG_RX_FRAME_MAX; @@ -835,8 +837,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; - if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & - rxd_wb->status) { + if (buff->is_lro) { /* LRO */ buff->next = rxd_wb->next_desc_ptr; ++ring->stats.rx.lro_packets; @@ -884,13 +885,16 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; unsigned int i = 0U; + u32 vlan_promisc; + u32 l2_promisc; - hw_atl_rpfl2promiscuous_mode_en_set(self, - IS_FILTER_ENABLED(IFF_PROMISC)); + l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) || + !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)); + vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc; - hw_atl_rpf_vlan_prom_mode_en_set(self, - IS_FILTER_ENABLED(IFF_PROMISC) || - cfg->is_vlan_force_promisc); + hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc); + + hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc); hw_atl_rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_ALLMULTI) && @@ -1161,6 +1165,8 @@ static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta) { self->ptp_clk_offset += delta; + self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset); + return 0; } @@ -1211,7 +1217,7 @@ static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index, fwreq.ptp_gpio_ctrl.index = index; fwreq.ptp_gpio_ctrl.period = period; /* Apply time offset */ - fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset; + fwreq.ptp_gpio_ctrl.start = start; size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl); return self->aq_fw_ops->send_fw_request(self, &fwreq, size); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index f547baa6c954..354705f9bc49 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -22,6 +22,7 @@ #define HW_ATL_MIF_ADDR 0x0208U #define HW_ATL_MIF_VAL 0x020CU +#define HW_ATL_MPI_RPC_ADDR 0x0334U #define HW_ATL_RPC_CONTROL_ADR 0x0338U #define HW_ATL_RPC_STATE_ADR 0x033CU @@ -53,15 +54,14 @@ enum mcp_area { }; static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); - static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); - static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self); static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self); static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self); static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self); static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self); +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self); int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) { @@ -476,6 +476,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, self, self->mbox_addr, self->mbox_addr != 0U, 1000U, 10000U); + err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self, + self->rpc_addr, + self->rpc_addr != 0U, + 1000U, 100000U); return err; } @@ -531,6 +535,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self, fw.val, sw.tid == fw.tid, 1000U, 100000U); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; if (fw.len == 0xFFFFU) { err = hw_atl_utils_fw_rpc_call(self, sw.len); @@ -1025,6 +1035,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self) return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR); } +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR); +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 97ebf849695f..77a4ed64830f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -30,6 +30,9 @@ #define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378 #define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c +#define HW_ATL_FW3X_PTP_ADJ_LSW_ADDR 0x50a0 +#define HW_ATL_FW3X_PTP_ADJ_MSW_ADDR 0x50a4 + #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE) #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE) #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) @@ -475,6 +478,14 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable) aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts); } +static void aq_fw3x_adjust_ptp(struct aq_hw_s *self, uint64_t adj) +{ + aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_LSW_ADDR, + (adj >> 0) & 0xffffffff); + aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_MSW_ADDR, + (adj >> 32) & 0xffffffff); +} + static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode) { if (self->fw_ver_actual < HW_ATL_FW_VER_LED) @@ -633,4 +644,5 @@ const struct aq_fw_ops aq_fw_2x_ops = { .enable_ptp = aq_fw3x_enable_ptp, .led_control = aq_fw2x_led_control, .set_phyloopback = aq_fw2x_set_phyloopback, + .adjust_ptp = aq_fw3x_adjust_ptp, }; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index e0611cba87f9..15b31cddc054 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, return -ENOSPC; index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); - if (index > RXCHK_BRCM_TAG_MAX) + if (index >= RXCHK_BRCM_TAG_MAX) return -ENOSPC; /* Location is the classification ID, and index is the position diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 597e6fd5bfea..d28b406a26b1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6880,12 +6880,12 @@ skip_rdma: } ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); - if (rc) + if (rc) { netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", rc); - else - ctx->flags |= BNXT_CTX_FLAG_INITED; - + return rc; + } + ctx->flags |= BNXT_CTX_FLAG_INITED; return 0; } @@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) pri2cos = &resp2->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; + u8 queue_idx; + /* Per port queue IDs start from 0, 10, 20, etc */ + queue_idx = queue_id % 10; + if (queue_idx > BNXT_MAX_QUEUE) { + bp->pri2cos_valid = false; + goto qstats_done; + } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) - bp->pri2cos[i] = j; + bp->pri2cos_idx[i] = queue_idx; } } bp->pri2cos_valid = 1; } +qstats_done: mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -10982,13 +10990,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) struct bnxt *bp = netdev_priv(dev); if (netif_running(dev)) - bnxt_close_nic(bp, false, false); + bnxt_close_nic(bp, true, false); dev->mtu = new_mtu; bnxt_set_ring_params(bp); if (netif_running(dev)) - return bnxt_open_nic(bp, false, false); + return bnxt_open_nic(bp, true, false); return 0; } @@ -11252,7 +11260,7 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!"); + netdev_info(bp->dev, "Receive PF driver unload event!\n"); } #else @@ -11669,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->rx_nr_rings++; bp->cp_nr_rings++; } + if (rc) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } return rc; } @@ -11759,7 +11771,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) u32 dw; if (!pos) { - netdev_info(bp->dev, "Unable do read adapter's DSN"); + netdev_info(bp->dev, "Unable do read adapter's DSN\n"); return -EOPNOTSUPP; } @@ -11786,6 +11798,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (version_printed++ == 0) pr_info("%s", version); + /* Clear any pending DMA transactions from crash kernel + * while loading driver in capture kernel. + */ + if (is_kdump_kernel()) { + pci_clear_master(pdev); + pcie_flr(pdev); + } + max_irqs = bnxt_get_max_irq(pdev); dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); if (!dev) @@ -11954,12 +11974,12 @@ init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_ctx_mem(bp); - kfree(bp->ctx); - bp->ctx = NULL; kfree(bp->fw_health); bp->fw_health = NULL; bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; init_err_free: free_netdev(dev); @@ -11983,10 +12003,10 @@ static void bnxt_shutdown(struct pci_dev *pdev) dev_close(dev); bnxt_ulp_shutdown(bp); + bnxt_clear_int_mode(bp); + pci_disable_device(pdev); if (system_state == SYSTEM_POWER_OFF) { - bnxt_clear_int_mode(bp); - pci_disable_device(pdev); pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index cabef0b4f5fb..63b170658532 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1716,7 +1716,7 @@ struct bnxt { u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; - u8 pri2cos[8]; + u8 pri2cos_idx[8]; u8 pri2cos_valid; u16 hwrm_max_req_len; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fb6f30d0d1d0..b1511bcffb1b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct bnxt *bp = netdev_priv(dev); struct ieee_ets *my_ets = bp->ieee_ets; + int rc; ets->ets_cap = bp->max_tc; if (!my_ets) { - int rc; - if (bp->dcbx_cap & DCB_CAP_DCBX_HOST) return 0; my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL); if (!my_ets) - return 0; + return -ENOMEM; rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets); if (rc) - return 0; + goto error; rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets); if (rc) - return 0; + goto error; + + /* cache result */ + bp->ieee_ets = my_ets; } ets->cbs = my_ets->cbs; @@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; +error: + kfree(my_ets); + return rc; } static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index eec0168330b7..d3c93ccee86a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -641,14 +641,14 @@ static int bnxt_dl_params_register(struct bnxt *bp) rc = devlink_params_register(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); if (rc) { - netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", rc); return rc; } rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, ARRAY_SIZE(bnxt_dl_port_params)); if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed"); + netdev_err(bp->dev, "devlink_port_params_register failed\n"); devlink_params_unregister(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); return rc; @@ -679,7 +679,7 @@ int bnxt_dl_register(struct bnxt *bp) else dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { - netdev_warn(bp->dev, "devlink_alloc failed"); + netdev_warn(bp->dev, "devlink_alloc failed\n"); return -ENOMEM; } @@ -692,7 +692,7 @@ int bnxt_dl_register(struct bnxt *bp) rc = devlink_register(dl, &bp->pdev->dev); if (rc) { - netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); goto err_dl_free; } @@ -704,7 +704,7 @@ int bnxt_dl_register(struct bnxt *bp) sizeof(bp->dsn)); rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { - netdev_err(bp->dev, "devlink_port_register failed"); + netdev_err(bp->dev, "devlink_port_register failed\n"); goto err_dl_unreg; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6171fa8b3677..3f8a1ded662a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -589,25 +589,25 @@ skip_ring_stats: if (bp->pri2cos_valid) { for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } @@ -2007,8 +2007,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; - int rc, hwrm_err = 0; u32 item_len; + int rc = 0; u16 index; bnxt_hwrm_fw_set_time(bp); @@ -2028,7 +2028,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, } if (fw->size > item_len) { - netdev_err(dev, "PKG insufficient update area in nvram: %lu", + netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", (unsigned long)fw->size); rc = -EFBIG; } else { @@ -2052,15 +2052,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle); - hwrm_err = hwrm_send_message(bp, &modify, - sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + rc = hwrm_send_message(bp, &modify, sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc || hwrm_err) + if (rc) goto err_exit; if ((install_type & 0xffff) == 0) @@ -2069,20 +2068,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, install.install_type = cpu_to_le32(install_type); mutex_lock(&bp->hwrm_cmd_lock); - hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) { + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; if (resp->error_code && error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - hwrm_err = _hwrm_send_message(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); } - if (hwrm_err) + if (rc) goto flash_pkg_exit; } @@ -2094,7 +2092,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); err_exit: - if (hwrm_err == -EACCES) + if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } @@ -3338,7 +3336,7 @@ err: kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); if (rc == -ENOBUFS) - netdev_err(bp->dev, "Firmware returned large coredump buffer"); + netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0cc6ec51f45f..9bec256b0934 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -50,7 +50,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) /* check if dev belongs to the same switch */ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { - netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n", dev->ifindex); return BNXT_FID_INVALID; } @@ -70,7 +70,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, struct net_device *dev = act->dev; if (!dev) { - netdev_info(bp->dev, "no dev in mirred action"); + netdev_info(bp->dev, "no dev in mirred action\n"); return -EINVAL; } @@ -106,7 +106,7 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, const struct ip_tunnel_key *tun_key = &tun_info->key; if (ip_tunnel_info_af(tun_info) != AF_INET) { - netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); + netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n"); return -EOPNOTSUPP; } @@ -295,7 +295,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, int i, rc; if (!flow_action_has_entries(flow_action)) { - netdev_info(bp->dev, "no actions"); + netdev_info(bp->dev, "no actions\n"); return -EINVAL; } @@ -370,7 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { - netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -508,7 +508,7 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -841,7 +841,7 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, resp = bnxt_get_hwrm_resp_addr(bp, &req); *decap_filter_handle = resp->decap_filter_id; } else { - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); } mutex_unlock(&bp->hwrm_cmd_lock); @@ -859,7 +859,7 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -906,7 +906,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, resp = bnxt_get_hwrm_resp_addr(bp, &req); *encap_record_handle = resp->encap_record_id; } else { - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); } mutex_unlock(&bp->hwrm_cmd_lock); @@ -924,7 +924,7 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -943,7 +943,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp, tc_info->l2_ht_params); if (rc) netdev_err(bp->dev, - "Error: %s: rhashtable_remove_fast: %d", + "Error: %s: rhashtable_remove_fast: %d\n", __func__, rc); kfree_rcu(l2_node, rcu); } @@ -972,7 +972,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, if (rc) { kfree_rcu(l2_node, rcu); netdev_err(bp->dev, - "Error: %s: rhashtable_insert_fast: %d", + "Error: %s: rhashtable_insert_fast: %d\n", __func__, rc); return NULL; } @@ -1031,7 +1031,7 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && (flow->l4_key.ip_proto != IPPROTO_TCP && flow->l4_key.ip_proto != IPPROTO_UDP)) { - netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n", flow->l4_key.ip_proto); return false; } @@ -1088,7 +1088,7 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp, rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, *ht_params); if (rc) { - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); rc = -1; } kfree_rcu(tunnel_node, rcu); @@ -1129,7 +1129,7 @@ bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, tunnel_node->refcount++; return tunnel_node; err: - netdev_info(bp->dev, "error rc=%d", rc); + netdev_info(bp->dev, "error rc=%d\n", rc); return NULL; } @@ -1187,7 +1187,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, &decap_l2_node->node, tc_info->decap_l2_ht_params); if (rc) - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); kfree_rcu(decap_l2_node, rcu); } } @@ -1227,7 +1227,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, rt = ip_route_output_key(dev_net(real_dst_dev), &flow); if (IS_ERR(rt)) { - netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); + netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); return -EOPNOTSUPP; } @@ -1241,7 +1241,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, if (vlan->real_dev != real_dst_dev) { netdev_info(bp->dev, - "dst_dev(%s) doesn't use PF-if(%s)", + "dst_dev(%s) doesn't use PF-if(%s)\n", netdev_name(dst_dev), netdev_name(real_dst_dev)); rc = -EOPNOTSUPP; @@ -1253,7 +1253,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, #endif } else if (dst_dev != real_dst_dev) { netdev_info(bp->dev, - "dst_dev(%s) for %pI4b is not PF-if(%s)", + "dst_dev(%s) for %pI4b is not PF-if(%s)\n", netdev_name(dst_dev), &flow.daddr, netdev_name(real_dst_dev)); rc = -EOPNOTSUPP; @@ -1262,7 +1262,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); if (!nbr) { - netdev_info(bp->dev, "can't lookup neighbor for %pI4b", + netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n", &flow.daddr); rc = -EOPNOTSUPP; goto put_rt; @@ -1472,7 +1472,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, tc_info->flow_ht_params); if (rc) - netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", __func__, rc); kfree_rcu(flow_node, rcu); @@ -1587,7 +1587,7 @@ unlock: free_node: kfree_rcu(new_node, rcu); done: - netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n", __func__, tc_flow_cmd->cookie, rc); return rc; } @@ -1700,7 +1700,7 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, le64_to_cpu(resp_bytes[i]); } } else { - netdev_info(bp->dev, "error rc=%d", rc); + netdev_info(bp->dev, "error rc=%d\n", rc); } mutex_unlock(&bp->hwrm_cmd_lock); @@ -1970,7 +1970,7 @@ static int bnxt_tc_indr_block_event(struct notifier_block *nb, bp); if (rc) netdev_info(bp->dev, - "Failed to register indirect blk: dev: %s", + "Failed to register indirect blk: dev: %s\n", netdev->name); break; case NETDEV_UNREGISTER: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b010b34cdaf8..6f2faf81c1ae 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -43,7 +43,7 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", *tx_cfa_action, *rx_cfa_code); } else { - netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); } mutex_unlock(&bp->hwrm_cmd_lock); @@ -60,7 +60,7 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } @@ -465,7 +465,7 @@ static int bnxt_vf_reps_create(struct bnxt *bp) return 0; err: - netdev_info(bp->dev, "%s error=%d", __func__, rc); + netdev_info(bp->dev, "%s error=%d\n", __func__, rc); kfree(cfa_code_map); __bnxt_vf_reps_destroy(bp); return rc; @@ -488,7 +488,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, mutex_lock(&bp->sriov_lock); if (bp->eswitch_mode == mode) { - netdev_info(bp->dev, "already in %s eswitch mode", + netdev_info(bp->dev, "already in %s eswitch mode\n", mode == DEVLINK_ESWITCH_MODE_LEGACY ? "legacy" : "switchdev"); rc = -EINVAL; @@ -508,7 +508,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, } if (pci_num_vf(bp->pdev) == 0) { - netdev_info(bp->dev, "Enable VFs before setting switchdev mode"); + netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n"); rc = -EPERM; goto done; } diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index b38499774071..99e2c6d4d8c3 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -543,13 +543,13 @@ struct l4_kwq_update_pg { #define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 #endif #if defined(__BIG_ENDIAN) - u16 reserverd3; + u16 reserved3; u8 da0; u8 da1; #elif defined(__LITTLE_ENDIAN) u8 da1; u8 da0; - u16 reserverd3; + u16 reserved3; #endif #if defined(__BIG_ENDIAN) u8 da2; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index e50a15397e11..1d678bee2cc9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); } -static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, - void __iomem *d) -{ - return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS); -} - static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr) @@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev, return phy_ethtool_ksettings_set(dev->phydev, cmd); } -static void bcmgenet_set_rx_csum(struct net_device *dev, - netdev_features_t wanted) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 rbuf_chk_ctrl; - bool rx_csum_en; - - rx_csum_en = !!(wanted & NETIF_F_RXCSUM); - - rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); - - /* enable rx checksumming */ - if (rx_csum_en) - rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; - else - rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; - priv->desc_rxchk_en = rx_csum_en; - - /* If UniMAC forwards CRC, we need to skip over it to get - * a valid CHK bit to be set in the per-packet status word - */ - if (rx_csum_en && priv->crc_fwd_en) - rbuf_chk_ctrl |= RBUF_SKIP_FCS; - else - rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; - - bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); -} - -static void bcmgenet_set_tx_csum(struct net_device *dev, - netdev_features_t wanted) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - bool desc_64b_en; - u32 tbuf_ctrl, rbuf_ctrl; - - tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); - rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); - - desc_64b_en = !!(wanted & NETIF_F_HW_CSUM); - - /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ - if (desc_64b_en) { - tbuf_ctrl |= RBUF_64B_EN; - rbuf_ctrl |= RBUF_64B_EN; - } else { - tbuf_ctrl &= ~RBUF_64B_EN; - rbuf_ctrl &= ~RBUF_64B_EN; - } - priv->desc_64b_en = desc_64b_en; - - bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); - bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); -} - static int bcmgenet_set_features(struct net_device *dev, netdev_features_t features) { @@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev, reg = bcmgenet_umac_readl(priv, UMAC_CMD); priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); - bcmgenet_set_tx_csum(dev, features); - bcmgenet_set_rx_csum(dev, features); - clk_disable_unprepare(priv->clk); return ret; @@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) /* Reallocate the SKB to put enough headroom in front of it and insert * the transmit checksum offsets in the descriptors */ -static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, - struct sk_buff *skb) +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) { struct bcmgenet_priv *priv = netdev_priv(dev); struct status_64 *status = NULL; @@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) */ GENET_CB(skb)->bytes_sent = skb->len; - /* set the SKB transmit checksum */ - if (priv->desc_64b_en) { - skb = bcmgenet_put_tx_csum(dev, skb); - if (!skb) { - ret = NETDEV_TX_OK; - goto out; - } + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; } for (i = 0; i <= nr_frags; i++) { @@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + cb = &priv->rx_cbs[ring->read_ptr]; skb = bcmgenet_rx_refill(priv, cb); @@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, goto next; } - if (!priv->desc_64b_en) { - dma_length_status = - dmadesc_get_length_status(priv, cb->bd_addr); - } else { - struct status_64 *status; - __be16 rx_csum; - - status = (struct status_64 *)skb->data; - dma_length_status = status->length_status; + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - if (priv->desc_rxchk_en) { - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; - } + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; } /* DMA flags and length are still valid no matter how @@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, } /* error packet */ skb_put(skb, len); - if (priv->desc_64b_en) { - skb_pull(skb, 64); - len -= 64; - } - /* remove hardware 2bytes added for IP alignment */ - skb_pull(skb, 2); - len -= 2; + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + len -= 66; if (priv->crc_fwd_en) { skb_trim(skb, len - ETH_FCS_LEN); @@ -1965,6 +1890,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) u32 reg; reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; if (enable) reg |= mask; else @@ -1984,11 +1911,9 @@ static void reset_umac(struct bcmgenet_priv *priv) bcmgenet_rbuf_ctrl_set(priv, 0); udelay(10); - /* disable MAC while updating its registers */ - bcmgenet_umac_writel(priv, 0, UMAC_CMD); - - /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ - bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv) bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); - /* init rx registers, enable ip header optimization */ + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); - reg |= RBUF_ALIGN_2B; + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 61a6fe9f4cec..daf8fb2c39b6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -273,6 +273,7 @@ struct bcmgenet_mib_counters { #define RBUF_FLTR_LEN_SHIFT 8 #define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) #define TBUF_BP_MC 0x0C #define TBUF_ENERGY_CTRL 0x14 #define TBUF_EEE_EN (1 << 0) @@ -662,8 +663,6 @@ struct bcmgenet_priv { unsigned int irq0_stat; /* HW descriptors/checksum variables */ - bool desc_64b_en; - bool desc_rxchk_en; bool crc_fwd_en; u32 dma_max_burst_length; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index ea20d94bd050..c9a43695b182 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, return -EINVAL; } - /* disable RX */ + /* Can't suspend with WoL if MAC is still in reset */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ reg &= ~CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); mdelay(10); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 6392a2530183..b5930f80039d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_device *dev) CMD_HD_EN | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } bcmgenet_umac_writel(priv, reg, UMAC_CMD); } else { /* done if nothing has changed */ @@ -181,38 +187,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) const char *phy_name = NULL; u32 id_mode_dis = 0; u32 port_ctrl; - int bmcr = -1; - int ret; u32 reg; - /* MAC clocking workaround during reset of umac state machines */ - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (reg & CMD_SW_RESET) { - /* An MII PHY must be isolated to prevent TXC contention */ - if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { - ret = phy_read(phydev, MII_BMCR); - if (ret >= 0) { - bmcr = ret; - ret = phy_write(phydev, MII_BMCR, - bmcr | BMCR_ISOLATE); - } - if (ret) { - netdev_err(dev, "failed to isolate PHY\n"); - return ret; - } - } - /* Switch MAC clocking to RGMII generated clock */ - bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); - /* Ensure 5 clks with Rx disabled - * followed by 5 clks with Reset asserted - */ - udelay(4); - reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN); - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - /* Ensure 5 more clocks before Rx is enabled */ - udelay(2); - } - switch (priv->phy_interface) { case PHY_INTERFACE_MODE_INTERNAL: phy_name = "internal PHY"; @@ -282,10 +258,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - /* Restore the MII PHY after isolation */ - if (bmcr >= 0) - phy_write(phydev, MII_BMCR, bmcr); - priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); @@ -294,6 +266,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) */ if (priv->ext_phy) { reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; reg |= id_mode_dis; if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) reg |= RGMII_MODE_EN_V123; diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index dbf7070fcdba..a3f0f27fc79a 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -652,6 +652,7 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 4508f0d150da..2c28da1737fe 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -572,8 +572,21 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); /* Clear all the bits we might set later */ - ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) | - GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); + ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE)); + + if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { + if (state->interface == PHY_INTERFACE_MODE_RMII) + ctrl |= MACB_BIT(RM9200_RMII); + } else { + ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); + + /* We do not support MLO_PAUSE_RX yet */ + if (state->pause & MLO_PAUSE_TX) + ctrl |= MACB_BIT(PAE); + + if (state->interface == PHY_INTERFACE_MODE_SGMII) + ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); + } if (state->speed == SPEED_1000) ctrl |= GEM_BIT(GBE); @@ -583,13 +596,6 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, if (state->duplex) ctrl |= MACB_BIT(FD); - /* We do not support MLO_PAUSE_RX yet */ - if (state->pause & MLO_PAUSE_TX) - ctrl |= MACB_BIT(PAE); - - if (state->interface == PHY_INTERFACE_MODE_SGMII) - ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); - /* Apply the new configuration, if any */ if (old_ctrl ^ ctrl) macb_or_gem_writel(bp, NCFGR, ctrl); @@ -608,9 +614,10 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, unsigned int q; u32 ctrl; - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) - queue_writel(queue, IDR, - bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + queue_writel(queue, IDR, + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); /* Disable Rx and Tx */ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); @@ -627,17 +634,19 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode, struct macb_queue *queue; unsigned int q; - macb_set_tx_clk(bp->tx_clk, bp->speed, ndev); + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { + macb_set_tx_clk(bp->tx_clk, bp->speed, ndev); - /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down - * cleared the pipeline and control registers. - */ - bp->macbgem_ops.mog_init_rings(bp); - macb_init_buffers(bp); + /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down + * cleared the pipeline and control registers. + */ + bp->macbgem_ops.mog_init_rings(bp); + macb_init_buffers(bp); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) - queue_writel(queue, IER, - bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + queue_writel(queue, IER, + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); + } /* Enable Rx and Tx */ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); @@ -3790,6 +3799,10 @@ static int at91ether_open(struct net_device *dev) u32 ctl; int ret; + ret = pm_runtime_get_sync(&lp->pdev->dev); + if (ret < 0) + return ret; + /* Clear internal statistics */ ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); @@ -3854,7 +3867,7 @@ static int at91ether_close(struct net_device *dev) q->rx_buffers, q->rx_buffers_dma); q->rx_buffers = NULL; - return 0; + return pm_runtime_put(&lp->pdev->dev); } /* Transmit packet */ @@ -4037,7 +4050,6 @@ static int at91ether_init(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(dev); int err; - u32 reg; bp->queues[0].bp = bp; @@ -4051,11 +4063,7 @@ static int at91ether_init(struct platform_device *pdev) macb_writel(bp, NCR, 0); - reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); - if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) - reg |= MACB_BIT(RM9200_RMII); - - macb_writel(bp, NCFGR, reg); + macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); return 0; } @@ -4214,7 +4222,7 @@ static const struct macb_config sama5d4_config = { }; static const struct macb_config emac_config = { - .caps = MACB_CAPS_NEEDS_RSTONUBR, + .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, .clk_init = at91ether_clk_init, .init = at91ether_init, }; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 17a4110c2e49..8ff28ed04b7f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) lmac = &bgx->lmac[lmacid]; cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); - if (enable) + if (enable) { cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; - else + + /* enable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, + GMI_TXX_INT_UNDFLW); + } else { cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + /* Disable TX FIFO Underflow interrupt */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, + GMI_TXX_INT_UNDFLW); + } bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); if (bgx->is_rgx) @@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx) return bgx_init_of_phy(bgx); } +static irqreturn_t bgx_intr_handler(int irq, void *data) +{ + struct bgx *bgx = (struct bgx *)data; + u64 status, val; + int lmac; + + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); + if (status & GMI_TXX_INT_UNDFLW) { + pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n", + bgx->bgx_id, lmac); + val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); + val &= ~CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + val |= CMR_EN; + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); + } + /* clear interrupts */ + bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status); + } + + return IRQ_HANDLED; +} + +static void bgx_register_intr(struct pci_dev *pdev) +{ + struct bgx *bgx = pci_get_drvdata(pdev); + int ret; + + ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET, + BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + pci_err(pdev, "Req for #%d msix vectors failed\n", + BGX_LMAC_VEC_OFFSET); + return; + } + ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL, + bgx, "BGX%d", bgx->bgx_id); + if (ret) + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); +} + static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; @@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, bgx); - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); @@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) bgx_init_hw(bgx); + bgx_register_intr(pdev); + /* Enable all LMACs */ for (lmac = 0; lmac < bgx->lmac_count; lmac++) { err = bgx_lmac_enable(bgx, lmac); @@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_enable: bgx_vnic[bgx->bgx_id] = NULL; + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); err_release_regions: pci_release_regions(pdev); err_disable_device: @@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev) for (lmac = 0; lmac < bgx->lmac_count; lmac++) bgx_lmac_disable(bgx, lmac); + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); + bgx_vnic[bgx->bgx_id] = NULL; pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 25888706bdcd..cdea49392185 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -180,6 +180,15 @@ #define BGX_GMP_GMI_TXX_BURST 0x38228 #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 +#define BGX_GMP_GMI_TXX_INT 0x38500 +#define BGX_GMP_GMI_TXX_INT_W1S 0x38508 +#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510 +#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518 +#define GMI_TXX_INT_PTP_LOST BIT_ULL(4) +#define GMI_TXX_INT_LATE_COL BIT_ULL(3) +#define GMI_TXX_INT_XSDEF BIT_ULL(2) +#define GMI_TXX_INT_XSCOL BIT_ULL(1) +#define GMI_TXX_INT_UNDFLW BIT_ULL(0) #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ #define BGX_MSIX_VEC_0_29_CTL 0x400008 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 2a2938bbb93a..fc05248984fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter) adapter->tids.tid_tab[i]; if (f && (f->valid || f->pending)) - cxgb4_del_filter(dev, i, &f->fs); + cxgb4_del_filter(dev, f->tid, &f->fs); } sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A); @@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter) f = (struct filter_entry *)adapter->tids.tid_tab[i]; if (f && (f->valid || f->pending)) - cxgb4_del_filter(dev, i, &f->fs); + cxgb4_del_filter(dev, f->tid, &f->fs); } } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 649842a8aa28..97f90edbc068 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc) static int cfg_queues(struct adapter *adap) { u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; + u32 i, n10g = 0, qidx = 0, n1g = 0; + u32 ncpus = num_online_cpus(); u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; - u32 i, n10g = 0, qidx = 0; -#ifndef CONFIG_CHELSIO_T4_DCB - int q10g = 0; -#endif + u32 q10g = 0, q1g; /* Reduce memory usage in kdump environment, disable all offload. */ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { @@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); + + /* We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; + + n1g = adap->params.nports - n10g; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ + q1g = 8; if (adap->params.nports * 8 > avail_eth_qsets) { dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", avail_eth_qsets, adap->params.nports * 8); return -ENOMEM; } - for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); + if (adap->params.nports * ncpus < avail_eth_qsets) + q10g = max(8U, ncpus); + else + q10g = max(8U, q10g); - pi->first_qset = qidx; - pi->nqsets = is_kdump_kernel() ? 1 : 8; - qidx += pi->nqsets; - } -#else /* !CONFIG_CHELSIO_T4_DCB */ - /* We default to 1 queue per non-10G port and up to # of cores queues - * per 10G port. - */ - if (n10g) - q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; - if (q10g > netif_get_num_default_rss_queues()) - q10g = netif_get_num_default_rss_queues(); + while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g)) + q10g--; - if (is_kdump_kernel()) +#else /* !CONFIG_CHELSIO_T4_DCB */ + q1g = 1; + q10g = min(q10g, ncpus); +#endif /* !CONFIG_CHELSIO_T4_DCB */ + if (is_kdump_kernel()) { q10g = 1; + q1g = 1; + } for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); pi->first_qset = qidx; - pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; qidx += pi->nqsets; } -#endif /* !CONFIG_CHELSIO_T4_DCB */ s->ethqsets = qidx; s->max_ethqsets = qidx; /* MSI-X may lower it later */ @@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap) * capped by the number of available cores. */ num_ulds = adap->num_uld + adap->num_ofld_uld; - i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus()); + i = min_t(u32, MAX_OFLD_QSETS, ncpus); avail_uld_qsets = roundup(i, adap->params.nports); if (avail_qsets < num_ulds * adap->params.nports) { adap->params.offload = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 58a039c3224a..af1f40cbccc8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) FW_PTP_CMD_PORTID_V(0)); c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; + c.u.ts.sign = (delta < 0) ? 1 : 0; + if (delta < 0) + delta = -delta; c.u.ts.tm = cpu_to_be64(delta); err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 97cda501e7e8..cab3d17e0e1a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, int maxreclaim) { + unsigned int reclaimed, hw_cidx; struct sge_txq *q = &eq->q; - unsigned int reclaimed; + int hw_in_use; if (!q->in_use || !__netif_tx_trylock(eq->txq)) return 0; @@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, /* Reclaim pending completed TX Descriptors. */ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); + hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); + hw_in_use = q->pidx - hw_cidx; + if (hw_in_use < 0) + hw_in_use += q->size; + /* If the TX Queue is currently stopped and there's now more than half * the queue available, restart it. Otherwise bail out since the rest * of what we want do here is with the possibility of shipping any * currently buffered Coalesced TX Work Request. */ - if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) { + if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { netif_tx_wake_queue(eq->txq); eq->q.restarts++; } @@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) * has opened up. */ eth_txq_stop(q); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!q->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * has opened up. */ eth_txq_stop(txq); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!txq->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* Start filling in our Work Request. Note that we do _not_ handle @@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, } txq = &s->ethtxq[pi->first_qset + rspq->idx]; - - /* We've got the Hardware Consumer Index Update in the Egress Update - * message. If we're using the SGE Doorbell Queue Timer mechanism, - * these Egress Update messages will be our sole CIDX Updates we get - * since we don't want to chew up PCIe bandwidth for both Ingress - * Messages and Status Page writes. However, The code which manages - * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value - * stored in the Status Page at the end of the TX Queue. It's easiest - * to simply copy the CIDX Update value from the Egress Update message - * to the Status Page. Also note that no Endian issues need to be - * considered here since both are Big Endian and we're just copying - * bytes consistently ... - */ - if (txq->dbqt) { - struct cpl_sge_egr_update *egr; - - egr = (struct cpl_sge_egr_update *)rsp; - WRITE_ONCE(txq->q.stat->cidx, egr->cidx); - } - t4_sge_eth_txq_egress_update(adapter, txq, -1); } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1ea3372775e6..e94ae9b94dbf 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) ether_addr_copy(pdata->dev_addr, mac_addr); + else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) + return ERR_CAST(mac_addr); return pdata; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index fd93d542f497..ca74a684a904 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1,4 +1,5 @@ /* Copyright 2008 - 2016 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); #define FSL_QMAN_MAX_OAL 127 /* Default alignment for start of data in an Rx FD */ +#ifdef CONFIG_DPAA_ERRATUM_A050385 +/* aligning data start to 64 avoids DMA transaction splits, unless the buffer + * is crossing a 4k page boundary + */ +#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16) +/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary + * crossings; also, all SG fragments except the last must have a size multiple + * of 256 to avoid DMA transaction splits + */ +#define DPAA_A050385_ALIGN 256 +#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \ + DPAA_A050385_ALIGN : 16) +#else #define DPAA_FD_DATA_ALIGNMENT 16 +#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT +#endif /* The DPAA requires 256 bytes reserved and mapped for the SGT */ #define DPAA_SGT_SIZE 256 @@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) #define DPAA_TIME_STAMP_SIZE 8 #define DPAA_HASH_RESULTS_SIZE 8 +#ifdef CONFIG_DPAA_ERRATUM_A050385 +#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\ + + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE)) +#else #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ dpaa_rx_extra_headroom) +#endif #define DPAA_ETH_PCD_RXQ_NUM 128 @@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; #define DPAA_BP_RAW_SIZE 4096 +#ifdef CONFIG_DPAA_ERRATUM_A050385 +#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \ + ~(DPAA_A050385_ALIGN - 1)) +#else #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) +#endif static int dpaa_max_frm; @@ -1192,7 +1218,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp, buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; buf_prefix_content.pass_time_stamp = true; - buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; + buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT; rx_p = ¶ms.specific_params.rx_params; rx_p->err_fqid = errq->fqid; @@ -1662,6 +1688,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) return CHECKSUM_NONE; } +#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a))) + /* Build a linear skb around the received buffer. * We are guaranteed there is enough room at the end of the data buffer to * accommodate the shared info area of the skb. @@ -1733,8 +1761,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, sg_addr = qm_sg_addr(&sgt[i]); sg_vaddr = phys_to_virt(sg_addr); - WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, - SMP_CACHE_BYTES)); + WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES)); dma_unmap_page(priv->rx_dma_dev, sg_addr, DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); @@ -2022,6 +2049,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, return 0; } +#ifdef CONFIG_DPAA_ERRATUM_A050385 +int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct sk_buff *new_skb, *skb = *s; + unsigned char *start, i; + + /* check linear buffer alignment */ + if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) + goto workaround; + + /* linear buffers just need to have an aligned start */ + if (!skb_is_nonlinear(skb)) + return 0; + + /* linear data size for nonlinear skbs needs to be aligned */ + if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN)) + goto workaround; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* all fragments need to have aligned start addresses */ + if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN)) + goto workaround; + + /* all but last fragment need to have aligned sizes */ + if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) && + (i < skb_shinfo(skb)->nr_frags - 1)) + goto workaround; + } + + return 0; + +workaround: + /* copy all the skb content into a new linear buffer */ + new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + + priv->tx_headroom); + if (!new_skb) + return -ENOMEM; + + /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */ + skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); + + /* Workaround for DPAA_A050385 requires data start to be aligned */ + start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); + if (start - new_skb->data != 0) + skb_reserve(new_skb, start - new_skb->data); + + skb_put(new_skb, skb->len); + skb_copy_bits(skb, 0, new_skb->data, skb->len); + skb_copy_header(new_skb, skb); + new_skb->dev = skb->dev; + + /* We move the headroom when we align it so we have to reset the + * network and transport header offsets relative to the new data + * pointer. The checksum offload relies on these offsets. + */ + skb_set_network_header(new_skb, skb_network_offset(skb)); + skb_set_transport_header(new_skb, skb_transport_offset(skb)); + + /* TODO: does timestamping need the result in the old skb? */ + dev_kfree_skb(skb); + *s = new_skb; + + return 0; +} +#endif + static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { @@ -2068,6 +2164,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) nonlinear = skb_is_nonlinear(skb); } +#ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) { + if (dpaa_a050385_wa(net_dev, &skb)) + goto enomem; + nonlinear = skb_is_nonlinear(skb); + } +#endif + if (nonlinear) { /* Just create a S/G fd based on the skb */ err = skb_to_sg_fd(priv, skb, &fd); @@ -2741,9 +2845,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); - return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, - DPAA_FD_DATA_ALIGNMENT) : - headroom; + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); } static int dpaa_eth_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4432a59904c7..23c5fef2f1ad 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); if (cycle > 0xFFFF) { dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); if (cycle > 0xFFFF) { - dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 0139cb9042ec..34150182cc35 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -8,3 +8,31 @@ config FSL_FMAN help Freescale Data-Path Acceleration Architecture Frame Manager (FMan) support + +config DPAA_ERRATUM_A050385 + bool + depends on ARM64 && FSL_DPAA + default y + help + DPAA FMan erratum A050385 software workaround implementation: + align buffers, data start, SG fragment length to avoid FMan DMA + splits. + FMAN DMA read or writes under heavy traffic load may cause FMAN + internal resource leak thus stopping further packet processing. + The FMAN internal queue can overflow when FMAN splits single + read or write transactions into multiple smaller transactions + such that more than 17 AXI transactions are in flight from FMAN + to interconnect. When the FMAN internal queue overflows, it can + stall further packet processing. The issue can occur with any + one of the following three conditions: + 1. FMAN AXI transaction crosses 4K address boundary (Errata + A010022) + 2. FMAN DMA address for an AXI transaction is not 16 byte + aligned, i.e. the last 4 bits of an address are non-zero + 3. Scatter Gather (SG) frames have more than one SG buffer in + the SG list and any one of the buffers, except the last + buffer in the SG list has data size that is not a multiple + of 16 bytes, i.e., other than 16, 32, 48, 64, etc. + With any one of the above three conditions present, there is + likelihood of stalled FMAN packet processing, especially under + stress with multiple ports injecting line-rate traffic. diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 934111def0be..f151d6e111dd 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1,5 +1,6 @@ /* * Copyright 2008-2015 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -566,6 +567,10 @@ struct fman_cfg { u32 qmi_def_tnums_thresh; }; +#ifdef CONFIG_DPAA_ERRATUM_A050385 +static bool fman_has_err_a050385; +#endif + static irqreturn_t fman_exceptions(struct fman *fman, enum fman_exceptions exception) { @@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev) } EXPORT_SYMBOL(fman_bind); +#ifdef CONFIG_DPAA_ERRATUM_A050385 +bool fman_has_errata_a050385(void) +{ + return fman_has_err_a050385; +} +EXPORT_SYMBOL(fman_has_errata_a050385); +#endif + static irqreturn_t fman_err_irq(int irq, void *handle) { struct fman *fman = (struct fman *)handle; @@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_free; } +#ifdef CONFIG_DPAA_ERRATUM_A050385 + fman_has_err_a050385 = + of_property_read_bool(fm_node, "fsl,erratum-a050385"); +#endif + return fman; fman_node_put: diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index 935c317fa696..f2ede1360f03 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -1,5 +1,6 @@ /* * Copyright 2008-2015 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -398,6 +399,10 @@ u16 fman_get_max_frm(void); int fman_get_rx_extra_headroom(void); +#ifdef CONFIG_DPAA_ERRATUM_A050385 +bool fman_has_errata_a050385(void); +#endif + struct fman *fman_bind(struct device *dev); #endif /* __FM_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f..0d2b4ab01f24 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -782,7 +782,7 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed) /* Set full duplex */ tmp &= ~IF_MODE_HD; - if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(memac->phy_if)) { /* Configure RGMII in manual mode */ tmp &= ~IF_MODE_RGMII_AUTO; tmp &= ~IF_MODE_RGMII_SP_MASK; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 1b0313900f98..d87158acdf6f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ + HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index acb796cc10d0..a7f40aa1a0ea 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 492bc9446463..d3b0cd74ecd2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2446,10 +2446,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { + struct hclge_mac *mac = &hdev->hw.mac; int ret; duplex = hclge_check_speed_dup(duplex, speed); - if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex) return 0; ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); @@ -7743,16 +7745,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, struct hclge_desc desc; int ret; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); - + /* read current vlan filter parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en ? fe_type : 0; req->vf_id = vf_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vlan filter config, ret = %d.\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_cmd_reuse_desc(&desc, false); + req->vlan_fe = filter_en ? + (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", + dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n", ret); return ret; @@ -8270,6 +8283,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) kfree(vlan); } } + clear_bit(vport->vport_id, hdev->vf_vlan_full); } void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) @@ -8486,6 +8500,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, } } +static void hclge_clear_vf_vlan(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + int ret; + int vf; + + /* clear port base vlan for all vf */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + vport = &hdev->vport[vf]; + vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vf vlan for vf%d, ret = %d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { @@ -9895,6 +9931,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_mac *mac = &hdev->hw.mac; hclge_reset_vf_rate(hdev); + hclge_clear_vf_vlan(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a3c0822191a9..3d850f6b1e37 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_get_link_mode(vport, req); break; case HCLGE_MBX_GET_VF_FLR_STATUS: + case HCLGE_MBX_VF_UNINIT: hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); hclge_rm_vport_all_mac_table(vport, true, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index d6597206e692..0510d85a7f6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2803,6 +2803,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { hclgevf_state_uninit(hdev); + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0, + false, NULL, 0); + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index eb53c15b13f3..5f2d57d1b2d3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock); - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { + if (!wait_for_completion_timeout(&done, + msecs_to_jiffies(CMDQ_TIMEOUT))) { spin_lock_bh(&cmdq->cmdq_lock); if (cmdq->errcode[curr_prod_idx] == &errcode) @@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) return -EBUSY; + dma_rmb(); + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); cmdq_sync_cmd_handler(cmdq, ci, errcode); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 6f2cf569a283..c7c75b772a86 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, } hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; hw_ioctxt.cmdq_depth = 0; @@ -359,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev) return -EFAULT; } -static int wait_for_io_stopped(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_io_status cmd_io_status; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - unsigned long end; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); - do { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_STATUS_GET, - &cmd_io_status, sizeof(cmd_io_status), - &cmd_io_status, &out_size, - HINIC_MGMT_MSG_SYNC); - if ((err) || (out_size != sizeof(cmd_io_status))) { - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", - err); - return err; - } - - if (cmd_io_status.status == IO_STOPPED) { - dev_info(&pdev->dev, "IO stopped\n"); - return 0; - } - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); - return -ETIMEDOUT; -} - /** * clear_io_resource - set the IO resources as not active in the NIC * @hwdev: the NIC HW device @@ -422,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev) return -EINVAL; } - err = wait_for_io_stopped(hwdev); - if (err) { - dev_err(&pdev->dev, "IO has not stopped yet\n"); - return err; - } + /* sleep 100ms to wait for firmware stopping I/O */ + msleep(100); cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index b069045de416..66fd2340d447 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt { u8 lro_en; u8 rsvd3; + u8 ppf_idx; u8 rsvd4; - u8 rsvd5; u16 rq_depth; u16 rx_buf_sz_idx; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 79243b626ddb..c0b6bcb067cd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val) * eq_update_ci - update the HW cons idx of event queue * @eq: the event queue to update the cons idx for **/ -static void eq_update_ci(struct hinic_eq *eq) +static void eq_update_ci(struct hinic_eq *eq, u32 arm_state) { u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); @@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq) val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | - HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); + HINIC_EQ_CI_SET(arm_state, INT_ARMED); val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); @@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq) if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) break; + dma_rmb(); + event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); if (event >= HINIC_MAX_AEQ_EVENTS) { dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); @@ -347,7 +349,7 @@ static void eq_irq_handler(void *data) else if (eq->type == HINIC_CEQ) ceq_irq_handler(eq); - eq_update_ci(eq); + eq_update_ci(eq, EQ_ARMED); } /** @@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, } set_eq_ctrls(eq); - eq_update_ci(eq); + eq_update_ci(eq, EQ_ARMED); err = alloc_eq_pages(eq); if (err) { @@ -752,18 +754,28 @@ err_req_irq: **/ static void remove_eq(struct hinic_eq *eq) { - struct msix_entry *entry = &eq->msix_entry; - - free_irq(entry->vector, eq); + hinic_set_msix_state(eq->hwif, eq->msix_entry.entry, + HINIC_MSIX_DISABLE); + free_irq(eq->msix_entry.vector, eq); if (eq->type == HINIC_AEQ) { struct hinic_eq_work *aeq_work = &eq->aeq_work; cancel_work_sync(&aeq_work->work); + /* clear aeq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); } else if (eq->type == HINIC_CEQ) { tasklet_kill(&eq->ceq_tasklet); + /* clear ceq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwif, + HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0); } + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq)); + eq_update_ci(eq, EQ_NOT_ARMED); + free_eq_pages(eq); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 517794509eb2..c7bb9ceca72c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -137,6 +137,7 @@ #define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) #define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) #define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) +#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) #define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) #define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c1a6be6bf6a8..8995e32dd1c0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -43,7 +43,7 @@ #define MSG_NOT_RESP 0xFFFF -#define MGMT_MSG_TIMEOUT 1000 +#define MGMT_MSG_TIMEOUT 5000 #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, goto unlock_sync_msg; } - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { + if (!wait_for_completion_timeout(recv_done, + msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); err = -ETIMEDOUT; goto unlock_sync_msg; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index f4a339b10b10..79091e131418 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -94,6 +94,7 @@ struct hinic_rq { struct hinic_wq *wq; + struct cpumask affinity_mask; u32 irq; u16 msix_entry; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 02a14f5e7fe3..13560975c103 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev) if (!num_cpus) num_cpus = num_online_cpus(); - nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus); + nic_dev->num_qps = hinic_hwdev_num_qps(hwdev); + nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); nic_dev->rss_limit = nic_dev->num_qps; nic_dev->num_rss = nic_dev->num_qps; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 56ea6d692f1c..815649e37cb1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) if (!rq_wqe) break; + /* make sure we read rx_done before packet length */ + dma_rmb(); + cqe = rq->cqe[ci]; status = be32_to_cpu(cqe->status); hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); @@ -475,7 +478,6 @@ static int rx_request_irq(struct hinic_rxq *rxq) struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_rq *rq = rxq->rq; struct hinic_qp *qp; - struct cpumask mask; int err; rx_add_napi(rxq); @@ -492,8 +494,8 @@ static int rx_request_irq(struct hinic_rxq *rxq) } qp = container_of(rq, struct hinic_qp, rq); - cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); - return irq_set_affinity_hint(rq->irq, &mask); + cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); + return irq_set_affinity_hint(rq->irq, &rq->affinity_mask); } static void rx_free_irq(struct hinic_rxq *rxq) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 0e13d1c7e474..365016450bdb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -45,7 +45,7 @@ #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) -#define MIN_SKB_LEN 17 +#define MIN_SKB_LEN 32 #define MAX_PAYLOAD_OFFSET 221 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) @@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget) do { hw_ci = HW_CONS_IDX(sq) & wq->mask; + dma_rmb(); + /* Reading a WQEBB to get real WQE size and consumer index. */ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); if ((!sq_wqe) || diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c75239d8820f..4bd33245bad6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; + bool saved_state = false; + unsigned long flags; u32 reset_state; int rc = 0; @@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work) return; } - reset_state = adapter->state; - rwi = get_next_rwi(adapter); while (rwi) { + spin_lock_irqsave(&adapter->state_lock, flags); + if (adapter->state == VNIC_REMOVING || adapter->state == VNIC_REMOVED) { + spin_unlock_irqrestore(&adapter->state_lock, flags); kfree(rwi); rc = EBUSY; break; } + if (!saved_state) { + reset_state = adapter->state; + adapter->state = VNIC_RESETTING; + saved_state = true; + } + spin_unlock_irqrestore(&adapter->state_lock, flags); + if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) { /* CHANGE_PARAM requestor holds rtnl_lock */ rc = do_change_param_reset(adapter, rwi, reset_state); @@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) __ibmvnic_delayed_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + spin_lock_init(&adapter->state_lock); mutex_init(&adapter->fw_lock); init_completion(&adapter->init_done); init_completion(&adapter->fw_done); @@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev) { struct net_device *netdev = dev_get_drvdata(&dev->dev); struct ibmvnic_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + + spin_lock_irqsave(&adapter->state_lock, flags); + if (adapter->state == VNIC_RESETTING) { + spin_unlock_irqrestore(&adapter->state_lock, flags); + return -EBUSY; + } adapter->state = VNIC_REMOVING; + spin_unlock_irqrestore(&adapter->state_lock, flags); + rtnl_lock(); unregister_netdevice(netdev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 60eccaf91b12..f8416e1d4cf0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1, VNIC_CLOSING, VNIC_CLOSED, VNIC_REMOVING, - VNIC_REMOVED}; + VNIC_REMOVED, + VNIC_RESETTING}; enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, VNIC_RESET_MOBILITY, @@ -1090,4 +1091,7 @@ struct ibmvnic_adapter { struct ibmvnic_tunables desired; struct ibmvnic_tunables fallback; + + /* Used for serializatin of state field */ + spinlock_t state_lock; }; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index db4ea58bac82..0f02c7a5ee9b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3280,10 +3280,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) dev_info(&adapter->pdev->dev, "Some CPU C-states have been disabled in order to enable jumbo frames\n"); - pm_qos_update_request(&adapter->pm_qos_req, lat); + cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); } else { - pm_qos_update_request(&adapter->pm_qos_req, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&adapter->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } /* Enable Receives */ @@ -4636,8 +4636,7 @@ int e1000e_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ - pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@ -4679,7 +4678,7 @@ int e1000e_open(struct net_device *netdev) return 0; err_req_irq: - pm_qos_remove_request(&adapter->pm_qos_req); + cpu_latency_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); @@ -4743,7 +4742,7 @@ int e1000e_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - pm_qos_remove_request(&adapter->pm_qos_req); + cpu_latency_qos_remove_request(&adapter->pm_qos_req); pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index b002ab4e5838..77c412a7e7a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2936,13 +2936,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) else return -EINVAL; - /* Tell the OS link is going down, the link will go back up when fw - * says it is ready asynchronously - */ - ice_print_link_msg(vsi, false); - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - /* Set the FC mode and only restart AN if link is up */ status = ice_set_fc(pi, &aq_failures, link_up); @@ -3489,21 +3482,13 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, return -EINVAL; } - /* hardware only supports an ITR granularity of 2us */ - if (coalesce_usecs % 2 != 0) { - netdev_info(vsi->netdev, "Invalid value, %s-usecs must be even\n", - c_type_str); - return -EINVAL; - } - if (use_adaptive_coalesce) { rc->itr_setting |= ICE_ITR_DYNAMIC; } else { - /* store user facing value how it was set */ + /* save the user set usecs */ rc->itr_setting = coalesce_usecs; - /* set to static and convert to value HW understands */ - rc->target_itr = - ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting)); + /* device ITR granularity is in 2 usec increments */ + rc->target_itr = ITR_REG_ALIGN(rc->itr_setting); } return 0; @@ -3597,6 +3582,30 @@ ice_is_coalesce_param_invalid(struct net_device *netdev, } /** + * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs + * @netdev: netdev used for print + * @itr_setting: previous user setting + * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled + * @coalesce_usecs: requested value of [tx|rx]-usecs + * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs + */ +static void +ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting, + u32 use_adaptive_coalesce, u32 coalesce_usecs, + const char *c_type_str) +{ + if (use_adaptive_coalesce) + return; + + itr_setting = ITR_TO_REG(itr_setting); + + if (itr_setting != coalesce_usecs && (coalesce_usecs % 2)) + netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n", + c_type_str, coalesce_usecs, c_type_str, + ITR_REG_ALIGN(coalesce_usecs)); +} + +/** * __ice_set_coalesce - set ITR/INTRL values for the device * @netdev: pointer to the netdev associated with this query * @ec: ethtool structure to fill with driver's coalesce settings @@ -3616,8 +3625,19 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return -EINVAL; if (q_num < 0) { + struct ice_q_vector *q_vector = vsi->q_vectors[0]; int v_idx; + if (q_vector) { + ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting, + ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs, "rx"); + + ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting, + ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs, "tx"); + } + ice_for_each_q_vector(vsi, v_idx) { /* In some cases if DCB is configured the num_[rx|tx]q * can be less than vsi->num_q_vectors. This check diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 14a1bf445889..7ee00a128663 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -222,7 +222,7 @@ enum ice_rx_dtype { #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ -#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK) +#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) #define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 #define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 262714d5f54a..75c70d432c72 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1874,6 +1874,48 @@ error_param: } /** + * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset + * @vf: The VF being resseting + * + * The max poll time is about ~800ms, which is about the maximum time it takes + * for a VF to be reset and/or a VF driver to be removed. + */ +static void ice_wait_on_vf_reset(struct ice_vf *vf) +{ + int i; + + for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) + break; + msleep(ICE_MAX_VF_RESET_SLEEP_MS); + } +} + +/** + * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried + * @vf: VF to check if it's ready to be configured/queried + * + * The purpose of this function is to make sure the VF is not in reset, not + * disabled, and initialized so it can be configured and/or queried by a host + * administrator. + */ +static int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + struct ice_pf *pf; + + ice_wait_on_vf_reset(vf); + + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + pf = vf->pf; + if (ice_check_vf_init(pf, vf)) + return -EBUSY; + + return 0; +} + +/** * ice_set_vf_spoofchk * @netdev: network interface device structure * @vf_id: VF identifier @@ -1890,16 +1932,16 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) enum ice_status status; struct device *dev; struct ice_vf *vf; - int ret = 0; + int ret; dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; vf_vsi = pf->vsi[vf->lan_vsi_idx]; if (!vf_vsi) { @@ -2696,7 +2738,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, struct ice_vsi *vsi; struct device *dev; struct ice_vf *vf; - int ret = 0; + int ret; dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) @@ -2714,13 +2756,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; if (le16_to_cpu(vsi->info.pvid) == vlanprio) { /* duplicate request, so just return success */ dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio); - return ret; + return 0; } /* If PVID, then remove all filters on the old VLAN */ @@ -2731,7 +2775,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (vlan_id || qos) { ret = ice_vsi_manage_pvid(vsi, vlanprio, true); if (ret) - goto error_set_pvid; + return ret; } else { ice_vsi_manage_pvid(vsi, 0, false); vsi->info.pvid = 0; @@ -2744,7 +2788,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, /* add new VLAN filter for each MAC */ ret = ice_vsi_add_vlan(vsi, vlan_id); if (ret) - goto error_set_pvid; + return ret; } /* The Port VLAN needs to be saved across resets the same as the @@ -2752,8 +2796,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); -error_set_pvid: - return ret; + return 0; } /** @@ -3237,23 +3280,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) } /** - * ice_wait_on_vf_reset - * @vf: The VF being resseting - * - * Poll to make sure a given VF is ready after reset - */ -static void ice_wait_on_vf_reset(struct ice_vf *vf) -{ - int i; - - for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) { - if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) - break; - msleep(20); - } -} - -/** * ice_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3265,29 +3291,21 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; - int ret = 0; + int ret; if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - vf = &pf->vf[vf_id]; - /* Don't set MAC on disabled VF */ - if (ice_is_vf_disabled(vf)) - return -EINVAL; - - /* In case VF is in reset mode, wait until it is completed. Depending - * on factors like queue disabling routine, this could take ~250ms - */ - ice_wait_on_vf_reset(vf); - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; - if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { netdev_err(netdev, "%pM not a valid unicast address\n", mac); return -EINVAL; } + vf = &pf->vf[vf_id]; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; + /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset * flow will use the updated dflt_lan_addr and add a MAC filter * using ice_add_mac. Also set pf_set_mac to indicate that the PF has @@ -3299,7 +3317,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf_id, mac); ice_vc_reset_vf(vf); - return ret; + return 0; } /** @@ -3314,22 +3332,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; + int ret; if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; - /* Don't set Trusted Mode on disabled VF */ - if (ice_is_vf_disabled(vf)) - return -EINVAL; - - /* In case VF is in reset mode, wait until it is completed. Depending - * on factors like queue disabling routine, this could take ~250ms - */ - ice_wait_on_vf_reset(vf); - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; /* Check if already trusted */ if (trusted == vf->trusted) @@ -3355,13 +3366,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; + int ret; if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: @@ -3397,14 +3410,15 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, struct ice_eth_stats *stats; struct ice_vsi *vsi; struct ice_vf *vf; + int ret; if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 4647d636ed36..ac67982751df 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -38,7 +38,8 @@ #define ICE_MAX_POLICY_INTR_PER_VF 33 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) -#define ICE_MAX_VF_RESET_WAIT 15 +#define ICE_MAX_VF_RESET_TRIES 40 +#define ICE_MAX_VF_RESET_SLEEP_MS 20 #define ice_for_each_vf(pf, i) \ for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 0b9e851f3da4..d14762d93640 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev) } - dev->err_interrupt = platform_get_irq(pdev, 0); + dev->err_interrupt = platform_get_irq_optional(pdev, 0); if (dev->err_interrupt > 0 && resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 98017e7d5dd0..11babc79dc6c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ - rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); - cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : port->cause_rx_tx; + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 9c481823b3e8..9486caecfbdc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -906,59 +906,59 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, int len = 0; mlx4_err(dev, "%s", str); - len += snprintf(buf + len, BUF_SIZE - len, - "port = %d prio = 0x%x qp = 0x%x ", - rule->port, rule->priority, rule->qpn); + len += scnprintf(buf + len, BUF_SIZE - len, + "port = %d prio = 0x%x qp = 0x%x ", + rule->port, rule->priority, rule->qpn); list_for_each_entry(cur, &rule->list, list) { switch (cur->id) { case MLX4_NET_TRANS_RULE_ID_ETH: - len += snprintf(buf + len, BUF_SIZE - len, - "dmac = %pM ", &cur->eth.dst_mac); + len += scnprintf(buf + len, BUF_SIZE - len, + "dmac = %pM ", &cur->eth.dst_mac); if (cur->eth.ether_type) - len += snprintf(buf + len, BUF_SIZE - len, - "ethertype = 0x%x ", - be16_to_cpu(cur->eth.ether_type)); + len += scnprintf(buf + len, BUF_SIZE - len, + "ethertype = 0x%x ", + be16_to_cpu(cur->eth.ether_type)); if (cur->eth.vlan_id) - len += snprintf(buf + len, BUF_SIZE - len, - "vlan-id = %d ", - be16_to_cpu(cur->eth.vlan_id)); + len += scnprintf(buf + len, BUF_SIZE - len, + "vlan-id = %d ", + be16_to_cpu(cur->eth.vlan_id)); break; case MLX4_NET_TRANS_RULE_ID_IPV4: if (cur->ipv4.src_ip) - len += snprintf(buf + len, BUF_SIZE - len, - "src-ip = %pI4 ", - &cur->ipv4.src_ip); + len += scnprintf(buf + len, BUF_SIZE - len, + "src-ip = %pI4 ", + &cur->ipv4.src_ip); if (cur->ipv4.dst_ip) - len += snprintf(buf + len, BUF_SIZE - len, - "dst-ip = %pI4 ", - &cur->ipv4.dst_ip); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-ip = %pI4 ", + &cur->ipv4.dst_ip); break; case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: if (cur->tcp_udp.src_port) - len += snprintf(buf + len, BUF_SIZE - len, - "src-port = %d ", - be16_to_cpu(cur->tcp_udp.src_port)); + len += scnprintf(buf + len, BUF_SIZE - len, + "src-port = %d ", + be16_to_cpu(cur->tcp_udp.src_port)); if (cur->tcp_udp.dst_port) - len += snprintf(buf + len, BUF_SIZE - len, - "dst-port = %d ", - be16_to_cpu(cur->tcp_udp.dst_port)); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-port = %d ", + be16_to_cpu(cur->tcp_udp.dst_port)); break; case MLX4_NET_TRANS_RULE_ID_IB: - len += snprintf(buf + len, BUF_SIZE - len, - "dst-gid = %pI6\n", cur->ib.dst_gid); - len += snprintf(buf + len, BUF_SIZE - len, - "dst-gid-mask = %pI6\n", - cur->ib.dst_gid_msk); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-gid = %pI6\n", cur->ib.dst_gid); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-gid-mask = %pI6\n", + cur->ib.dst_gid_msk); break; case MLX4_NET_TRANS_RULE_ID_VXLAN: - len += snprintf(buf + len, BUF_SIZE - len, - "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + len += scnprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; @@ -967,7 +967,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, break; } } - len += snprintf(buf + len, BUF_SIZE - len, "\n"); + len += scnprintf(buf + len, BUF_SIZE - len, "\n"); mlx4_err(dev, "%s", buf); if (len >= BUF_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 220ef9f06f84..c9606b8ab6ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -371,6 +371,7 @@ enum { struct mlx5e_sq_wqe_info { u8 opcode; + u8 num_wqebbs; /* Auxiliary data for different opcodes. */ union { @@ -1059,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 3a975641f902..20b907dc1e29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv, netdev_err(priv->netdev, err_str); if (!reporter) - return err_ctx->recover(&err_ctx->ctx); + return err_ctx->recover(err_ctx->ctx); return devlink_health_report(reporter, err_str, err_ctx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index d3693fa547ac..e54f70d9af22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -10,8 +10,7 @@ static inline bool cqe_syndrome_needs_recover(u8 syndrome) { - return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || - syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || + return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 6c72b592315b..a01e2de2488f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) goto out; mlx5e_reset_icosq_cc_pc(icosq); - mlx5e_free_rx_descs(rq); + mlx5e_free_rx_in_progress_descs(rq); clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); mlx5e_activate_icosq(icosq); mlx5e_activate_rq(rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 7c8796d9743f..f07b1399744e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -179,6 +179,16 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) } } +static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) +{ + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + mlx5_wq_ll_reset(&rq->mpwqe.wq); + rq->mpwqe.actual_wq_head = 0; + } else { + mlx5_wq_cyc_reset(&rq->wqe.wq); + } +} + /* SW parser related functions */ struct mlx5e_swp_spec { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index a3efa29a4629..63116be6b1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -38,8 +38,8 @@ enum { enum { MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, }; struct mlx5e_ktls_offload_context_tx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index f260dd96873b..52a56622034a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, * this packet was already acknowledged and its record info * was released. */ - ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); + ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); if (unlikely(tls_record_is_start_marker(record))) { ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 454d3459bd8b..4ef3dc79f73c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -712,6 +712,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) if (!in) return -ENOMEM; + if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) + mlx5e_rqwq_reset(rq); + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rq_state, curr_state); @@ -810,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) return -ETIMEDOUT; } +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq; + u16 head; + int i; + + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return; + + wq = &rq->mpwqe.wq; + head = wq->head; + + /* Outstanding UMR WQEs (in progress) start at wq->head */ + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { + rq->dealloc_wqe(rq, head); + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); + } + + rq->mpwqe.actual_wq_head = wq->head; + rq->mpwqe.umr_in_progress = 0; + rq->mpwqe.umr_completed = 0; +} + void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { __be16 wqe_ix_be; @@ -817,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - u16 head = wq->head; - int i; - /* Outstanding UMR WQEs (in progress) start at wq->head */ - for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { - rq->dealloc_wqe(rq, head); - head = mlx5_wq_ll_get_wqe_next_ix(wq, head); - } + mlx5e_free_rx_in_progress_descs(rq); while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; @@ -5144,7 +5164,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) static void mlx5e_nic_disable(struct mlx5e_priv *priv) { - struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -5165,7 +5184,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_monitor_counter_cleanup(priv); mlx5e_disable_async_events(priv); - mlx5_lag_remove(mdev, netdev); + mlx5_lag_remove(mdev); } int mlx5e_update_nic_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7b48ccacebe2..6ed307d7f191 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1861,7 +1861,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) { - struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1870,7 +1869,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) #endif mlx5_notifier_unregister(mdev, &priv->events_nb); cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); - mlx5_lag_remove(mdev, netdev); + mlx5_lag_remove(mdev); } static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1c3ab69cbd96..312d4692425b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { wi->opcode = MLX5_OPCODE_NOP; + wi->num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } } @@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS; sq->db.ico_wqe[pi].umr.rq = rq; sq->pc += MLX5E_UMR_WQEBBS; @@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.ico_wqe[ci]; + sqcc += wi->num_wqebbs; if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { netdev_WARN_ONCE(cq->channel->netdev, @@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; } - if (likely(wi->opcode == MLX5_OPCODE_UMR)) { - sqcc += MLX5E_UMR_WQEBBS; + if (likely(wi->opcode == MLX5_OPCODE_UMR)) wi->umr.rq->mpwqe.umr_completed++; - } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { - sqcc++; - } else { + else if (unlikely(wi->opcode != MLX5_OPCODE_NOP)) netdev_WARN_ONCE(cq->channel->netdev, "Bad OPCODE in ICOSQ WQE info: 0x%x\n", wi->opcode); - } } while (!last_wqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 74091f72c9a8..ec5fc52bf572 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, continue; if (f->field_bsize == 32) { - mask_be32 = *(__be32 *)&mask; + mask_be32 = (__be32)mask; mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); } else if (f->field_bsize == 16) { - mask_be16 = *(__be16 *)&mask; + mask_be32 = (__be32)mask; + mask_be16 = *(__be16 *)&mask_be32; mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 257a7c9f7a14..800d34ed8a96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + sq->db.ico_wqe[pi].num_wqebbs = 1; nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5acf60b1bbfe..e49acd0c5da5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -459,12 +459,16 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) static int esw_legacy_enable(struct mlx5_eswitch *esw) { - int ret; + struct mlx5_vport *vport; + int ret, i; ret = esw_create_legacy_table(esw); if (ret) return ret; + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); if (ret) esw_destroy_legacy_table(esw); @@ -2452,25 +2456,17 @@ out: int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) { - int err = 0; - if (!esw) return -EOPNOTSUPP; if (!ESW_ALLOWED(esw)) return -EPERM; - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto out; - } + if (esw->mode != MLX5_ESWITCH_LEGACY) + return -EOPNOTSUPP; *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; - -out: - mutex_unlock(&esw->state_lock); - return err; + return 0; } int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 979f13bdc203..1a57b2bd74b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1172,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return -EINVAL; } - mlx5_eswitch_disable(esw, true); + mlx5_eswitch_disable(esw, false); mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); if (err) { @@ -2065,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable(esw, true); + mlx5_eswitch_disable(esw, false); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c index c5a446e295aa..4276194b633f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c @@ -35,7 +35,7 @@ static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, 1 * 1024 * 1024, 64 * 1024, - 4 * 1024, }; + 128 }; struct mlx5_esw_chains_priv { struct rhashtable chains_ht; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index b91eabc09fbc..93052b07c76c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -464,9 +464,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, struct mlx5_lag *ldev; int changed = 0; - if (!net_eq(dev_net(ndev), &init_net)) - return NOTIFY_DONE; - if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) return NOTIFY_DONE; @@ -586,8 +583,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) if (!ldev->nb.notifier_call) { ldev->nb.notifier_call = mlx5_lag_netdev_event; - if (register_netdevice_notifier_dev_net(netdev, &ldev->nb, - &ldev->nn)) { + if (register_netdevice_notifier_net(&init_net, &ldev->nb)) { ldev->nb.notifier_call = NULL; mlx5_core_err(dev, "Failed to register LAG netdev notifier\n"); } @@ -600,7 +596,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) } /* Must be called with intf_mutex held */ -void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev) +void mlx5_lag_remove(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; int i; @@ -619,9 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev) break; if (i == MLX5_MAX_PORTS) { - if (ldev->nb.notifier_call) - unregister_netdevice_notifier_dev_net(netdev, &ldev->nb, - &ldev->nn); + if (ldev->nb.notifier_call) { + unregister_netdevice_notifier_net(&init_net, &ldev->nb); + ldev->nb.notifier_call = NULL; + } mlx5_lag_mp_cleanup(ldev); cancel_delayed_work_sync(&ldev->bond_work); mlx5_lag_dev_free(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h index 316ab09e2664..f1068aac6406 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h @@ -44,7 +44,6 @@ struct mlx5_lag { struct workqueue_struct *wq; struct delayed_work bond_work; struct notifier_block nb; - struct netdev_net_notifier nn; struct lag_mp lag_mp; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index fcce9e0fc82c..da67b28d6e23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, u8 feature_group, u8 access_reg_group); void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); -void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev); +void mlx5_lag_remove(struct mlx5_core_dev *dev); int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6dec2a550a10..2d93228ff633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, action->rewrite.data = (void *)ops; action->rewrite.num_of_actions = i; - action->rewrite.chunk->byte_size = i * sizeof(*ops); ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index c7f10d4f8f8d..095ec7b1399d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, int ret; send_info.write.addr = (uintptr_t)action->rewrite.data; - send_info.write.length = action->rewrite.chunk->byte_size; + send_info.write.length = action->rewrite.num_of_actions * + DR_MODIFY_ACTION_SIZE; send_info.write.lkey = 0; send_info.remote_addr = action->rewrite.chunk->mr_addr; send_info.rkey = action->rewrite.chunk->rkey; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index c6c7d1defbd7..aade62a9ee5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -2307,7 +2307,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_cmd_caps *caps; + u8 *bit_mask = sb->bit_mask; u8 *tag = hw_ste->tag; + bool source_gvmi_set; DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); @@ -2328,7 +2330,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (!vport_cap) return -EINVAL; - if (vport_cap->vport_gvmi) + source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); + if (vport_cap->vport_gvmi && source_gvmi_set) MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); misc->source_eswitch_owner_vhca_id = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3abfc8125926..c2027192e21e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -66,15 +66,20 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *next_ft) { struct mlx5dr_table *tbl; + u32 flags; int err; if (mlx5_dr_is_fw_table(ft->flags)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, log_size, next_ft); + flags = ft->flags; + /* turn off encap/decap if not supported for sw-str by fw */ + if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported)) + flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, - ft->level, ft->flags); + tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags); if (!tbl) { mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 1faac31f74d0..23f879da9104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); + MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); + MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, + req->cap_mask1_perm); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); ex: kfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 02f7e4a39578..01f075fac276 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -94,6 +94,13 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); } +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) +{ + wq->wqe_ctr = 0; + wq->cur_sz = 0; + mlx5_wq_cyc_update_db_record(wq); +} + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) @@ -192,6 +199,19 @@ err_db_free: return err; } +static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) +{ + struct mlx5_wqe_srq_next_seg *next_seg; + int i; + + for (i = 0; i < wq->fbc.sz_m1; i++) { + next_seg = mlx5_wq_ll_get_wqe(wq, i); + next_seg->next_wqe_index = cpu_to_be16(i + 1); + } + next_seg = mlx5_wq_ll_get_wqe(wq, i); + wq->tail_next = &next_seg->next_wqe_index; +} + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) @@ -199,9 +219,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; - struct mlx5_wqe_srq_next_seg *next_seg; int err; - int i; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -220,13 +238,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); - for (i = 0; i < fbc->sz_m1; i++) { - next_seg = mlx5_wq_ll_get_wqe(wq, i); - next_seg->next_wqe_index = cpu_to_be16(i + 1); - } - next_seg = mlx5_wq_ll_get_wqe(wq, i); - wq->tail_next = &next_seg->next_wqe_index; - + mlx5_wq_ll_init_list(wq); wq_ctrl->mdev = mdev; return 0; @@ -237,6 +249,15 @@ err_db_free: return err; } +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) +{ + wq->head = 0; + wq->wqe_ctr = 0; + wq->cur_sz = 0; + mlx5_wq_ll_init_list(wq); + mlx5_wq_ll_update_db_record(wq); +} + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) { mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index d9a94bc223c0..4cadc336593f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -80,6 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, @@ -92,6 +93,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl); +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 914c33e46fb4..e9ded1a6e131 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) +static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id, + u32 *p_sys_status) { unsigned long end; - char mrsr_pl[MLXSW_REG_MRSR_LEN]; - int err; + u32 val; - mlxsw_reg_mrsr_pack(mrsr_pl); - err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); - if (err) - return err; if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); return 0; } - /* We must wait for the HW to become responsive once again. */ + /* We must wait for the HW to become responsive. */ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - + val = mlxsw_pci_read32(mlxsw_pci, FW_READY); if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) return 0; cond_resched(); } while (time_before(jiffies, end)); + + *p_sys_status = val & MLXSW_PCI_FW_READY_MASK; + return -EBUSY; } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + u32 sys_status; + int err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n", + sys_status); + return err; + } + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n", + sys_status); + return err; + } + + return 0; +} + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index e0d7d2d9a0c8..43fa8c85b5d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -28,7 +28,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000 -#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 +#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF #define MLXSW_PCI_FW_READY_MAGIC 0x5E diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index dd6685156396..e05d1d1be2fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3572,7 +3572,7 @@ MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1); * When in bytes mode, value is specified in units of 1000bps. * Access: RW */ -MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28); +MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31); /* reg_qeec_de * DWRR configuration enable. Enables configuration of the dwrr and diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 54275624718b..336e5ecc68f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, return 0; err_erif_unresolve: - list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, - vif_node) + list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list, + vif_node) mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); err_irif_unresolve: - list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, - vif_node) + list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list, + vif_node) mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); mr_vif->rif = NULL; return err; diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index a41a90c589db..45cc840d8e2e 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -157,21 +157,47 @@ static int msg_enable; */ /** - * ks_rdreg8 - read 8 bit register from device + * ks_check_endian - Check whether endianness of the bus is correct * @ks : The chip information - * @offset: The register address * - * Read a 8bit register from the chip, returning the result + * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit + * bus. To maintain optimum performance, the bus endianness should be set + * such that it matches the endianness of the CPU. */ -static u8 ks_rdreg8(struct ks_net *ks, int offset) + +static int ks_check_endian(struct ks_net *ks) { - u16 data; - u8 shift_bit = offset & 0x03; - u8 shift_data = (offset & 1) << 3; - ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - data = ioread16(ks->hw_addr); - return (u8)(data >> shift_data); + u16 cider; + + /* + * Read CIDER register first, however read it the "wrong" way around. + * If the endian strap on the KS8851-16MLL in incorrect and the chip + * is operating in different endianness than the CPU, then the meaning + * of BE[3:0] byte-enable bits is also swapped such that: + * BE[3,2,1,0] becomes BE[1,0,3,2] + * + * Luckily for us, the byte-enable bits are the top four MSbits of + * the address register and the CIDER register is at offset 0xc0. + * Hence, by reading address 0xc0c0, which is not impacted by endian + * swapping, we assert either BE[3:2] or BE[1:0] while reading the + * CIDER register. + * + * If the bus configuration is correct, reading 0xc0c0 asserts + * BE[3:2] and this read returns 0x0000, because to read register + * with bottom two LSbits of address set to 0, BE[1:0] must be + * asserted. + * + * If the bus configuration is NOT correct, reading 0xc0c0 asserts + * BE[1:0] and this read returns non-zero 0x8872 value. + */ + iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd); + cider = ioread16(ks->hw_addr); + if (!cider) + return 0; + + netdev_err(ks->netdev, "incorrect EESK endian strap setting\n"); + + return -EINVAL; } /** @@ -190,22 +216,6 @@ static u16 ks_rdreg16(struct ks_net *ks, int offset) } /** - * ks_wrreg8 - write 8bit register value to chip - * @ks: The chip information - * @offset: The register address - * @value: The value to write - * - */ -static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) -{ - u8 shift_bit = (offset & 0x03); - u16 value_write = (u16)(value << ((offset & 1) << 3)); - ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - iowrite16(value_write, ks->hw_addr); -} - -/** * ks_wrreg16 - write 16bit register value to chip * @ks: The chip information * @offset: The register address @@ -324,8 +334,7 @@ static void ks_read_config(struct ks_net *ks) u16 reg_data = 0; /* Regardless of bus width, 8 bit read should always work.*/ - reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF; - reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8; + reg_data = ks_rdreg16(ks, KS_CCR); /* addr/data bus are multiplexed */ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; @@ -429,7 +438,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) /* 1. set sudo DMA mode */ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); /* 2. read prepend data */ /** @@ -446,7 +455,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) ks_inblk(ks, buf, ALIGN(len, 4)); /* 4. reset sudo DMA Mode */ - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); } /** @@ -548,14 +557,17 @@ static irqreturn_t ks_irq(int irq, void *pw) { struct net_device *netdev = pw; struct ks_net *ks = netdev_priv(netdev); + unsigned long flags; u16 status; + spin_lock_irqsave(&ks->statelock, flags); /*this should be the first in IRQ handler */ ks_save_cmd_reg(ks); status = ks_rdreg16(ks, KS_ISR); if (unlikely(!status)) { ks_restore_cmd_reg(ks); + spin_unlock_irqrestore(&ks->statelock, flags); return IRQ_NONE; } @@ -581,6 +593,7 @@ static irqreturn_t ks_irq(int irq, void *pw) ks->netdev->stats.rx_over_errors++; /* this should be the last in IRQ handler*/ ks_restore_cmd_reg(ks); + spin_unlock_irqrestore(&ks->statelock, flags); return IRQ_HANDLED; } @@ -650,6 +663,7 @@ static int ks_net_stop(struct net_device *netdev) /* shutdown RX/TX QMU */ ks_disable_qmu(ks); + ks_disable_int(ks); /* set powermode to soft power down to save power */ ks_set_powermode(ks, PMECR_PM_SOFTDOWN); @@ -679,13 +693,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) ks->txh.txw[1] = cpu_to_le16(len); /* 1. set sudo-DMA mode */ - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); /* 2. write status/lenth info */ ks_outblk(ks, ks->txh.txw, 4); /* 3. write pkt data */ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); /* 4. reset sudo-DMA mode */ - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); /* 6. wait until TXQCR_METFE is auto-cleared */ @@ -706,10 +720,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); + unsigned long flags; - disable_irq(netdev->irq); - ks_disable_int(ks); - spin_lock(&ks->statelock); + spin_lock_irqsave(&ks->statelock, flags); /* Extra space are required: * 4 byte for alignment, 4 for status/length, 4 for CRC @@ -723,9 +736,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb(skb); } else retv = NETDEV_TX_BUSY; - spin_unlock(&ks->statelock); - ks_enable_int(ks); - enable_irq(netdev->irq); + spin_unlock_irqrestore(&ks->statelock, flags); return retv; } @@ -1251,6 +1262,10 @@ static int ks8851_probe(struct platform_device *pdev) goto err_free; } + err = ks_check_endian(ks); + if (err) + goto err_free; + netdev->irq = platform_get_irq(pdev, 0); if ((int)netdev->irq < 0) { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 86d543ab1ab9..d3b7373c5961 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2176,24 +2176,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot) return 0; } -static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu) +/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. + * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. + */ +static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; int atop_wm; - ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG); + ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); /* Set Pause WM hysteresis - * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ - * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ + * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ */ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | SYS_PAUSE_CFG_PAUSE_STOP(101) | SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port); /* Tail dropping watermark */ - atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu), + atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / + OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen), SYS_ATOP, port); ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); } @@ -2222,9 +2227,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port) DEV_MAC_HDX_CFG); /* Set Max Length and maximum tags allowed */ - ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN); + ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, DEV_MAC_TAGS_CFG); @@ -2310,18 +2316,18 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, * Only one port can be an NPI at the same time. */ if (cpu < ocelot->num_phys_ports) { - int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN; + int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN; ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu), QSYS_EXT_CPU_CFG); if (injection == OCELOT_TAG_PREFIX_SHORT) - mtu += OCELOT_SHORT_PREFIX_LEN; + sdu += OCELOT_SHORT_PREFIX_LEN; else if (injection == OCELOT_TAG_PREFIX_LONG) - mtu += OCELOT_LONG_PREFIX_LEN; + sdu += OCELOT_LONG_PREFIX_LEN; - ocelot_port_set_mtu(ocelot, cpu, mtu); + ocelot_port_set_maxlen(ocelot, cpu, sdu); } /* CPU port Injection/Extraction configuration */ diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index b38820849faa..1135a18019c7 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -114,6 +114,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) if (err != 4) break; + /* At this point the IFH was read correctly, so it is safe to + * presume that there is no error. The err needs to be reset + * otherwise a frame could come in CPU queue between the while + * condition and the check for error later on. And in that case + * the new frame is just removed and not processed. + */ + err = 0; + ocelot_parse_ifh(ifh, &info); ocelot_port = ocelot->ports[info.port]; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index e678ba379598..628fa9b2f741 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ if ((mask & VXGE_DEBUG_MASK) == mask) \ - printk(fmt "\n", __VA_ARGS__); \ + printk(fmt "\n", ##__VA_ARGS__); \ } while (0) #else #define vxge_debug_ll(level, mask, fmt, ...) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 59a57ff5e96a..9c86f4f9cd42 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) #define vxge_debug_ll_config(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) #else #define vxge_debug_ll_config(level, fmt, ...) #endif #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) #define vxge_debug_init(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_init(level, fmt, ...) #endif #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) #define vxge_debug_tx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) #else #define vxge_debug_tx(level, fmt, ...) #endif #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) #define vxge_debug_rx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) #else #define vxge_debug_rx(level, fmt, ...) #endif #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) #define vxge_debug_mem(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) #else #define vxge_debug_mem(level, fmt, ...) #endif #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) #define vxge_debug_entryexit(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_entryexit(level, fmt, ...) #endif #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) #define vxge_debug_intr(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) #else #define vxge_debug_intr(level, fmt, ...) #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index b454db283aef..684e4e036c55 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -616,7 +616,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) if (bar->iomem) { int pf; - msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); + msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); bars_free--; @@ -661,7 +661,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ bar = &nfp->bar[1]; - msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, "); + msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, "); atomic_inc(&bar->refcnt); bars_free--; @@ -680,8 +680,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - msg += snprintf(msg, end - msg, - "0.%d: Explicit%d, ", 4 + i, i); + msg += scnprintf(msg, end - msg, + "0.%d: Explicit%d, ", 4 + i, i); atomic_inc(&bar->refcnt); bars_free--; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 87f82f36812f..46107de5e6c3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -103,7 +103,7 @@ int ionic_heartbeat_check(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; unsigned long hb_time; - u32 fw_status; + u8 fw_status; u32 hb; /* wait a little more than one second before testing again */ @@ -111,9 +111,12 @@ int ionic_heartbeat_check(struct ionic *ionic) if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period))) return 0; - /* firmware is useful only if fw_status is non-zero */ - fw_status = ioread32(&idev->dev_info_regs->fw_status); - if (!fw_status) + /* firmware is useful only if the running bit is set and + * fw_status != 0xff (bad PCI read) + */ + fw_status = ioread8(&idev->dev_info_regs->fw_status); + if (fw_status == 0xff || + !(fw_status & IONIC_FW_STS_F_RUNNING)) return -ENXIO; /* early FW has no heartbeat, else FW will return non-zero */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index ce07c2931a72..51adf5059834 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ /* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ #ifndef _IONIC_IF_H_ @@ -2445,6 +2445,7 @@ union ionic_dev_info_regs { u8 version; u8 asic_type; u8 asic_rev; +#define IONIC_FW_STS_F_RUNNING 0x1 u8 fw_status; u32 fw_heartbeat; char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 191271f6260d..938e19ee0bcd 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -948,18 +948,18 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) int i; #define REMAIN(__x) (sizeof(buf) - (__x)) - i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", - lif->rx_mode, rx_mode); + i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); if (rx_mode & IONIC_RX_MODE_F_UNICAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); if (rx_mode & IONIC_RX_MODE_F_MULTICAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); if (rx_mode & IONIC_RX_MODE_F_BROADCAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); if (rx_mode & IONIC_RX_MODE_F_PROMISC) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); err = ionic_adminq_post_wait(lif, &ctx); @@ -1688,7 +1688,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) return -EINVAL; - down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock); if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@ -1698,7 +1698,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) ether_addr_copy(ionic->vfs[vf].macaddr, mac); } - up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; } @@ -1719,7 +1719,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock); if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@ -1730,7 +1730,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ionic->vfs[vf].vlanid = vlan; } - up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h index 03ee5a36472b..2e174f45c030 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_regs.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ /* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */ #ifndef IONIC_REGS_H diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index e8a1b27db84d..234c6f30effb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -163,6 +163,8 @@ struct qede_rdma_dev { struct list_head entry; struct list_head rdma_event_list; struct workqueue_struct *rdma_wq; + struct kref refcnt; + struct completion event_comp; bool exp_recovery; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index ffabc2d2f082..2d873ae8a234 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -59,6 +59,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) static int qede_rdma_create_wq(struct qede_dev *edev) { INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); + kref_init(&edev->rdma_info.refcnt); + init_completion(&edev->rdma_info.event_comp); + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); if (!edev->rdma_info.rdma_wq) { DP_NOTICE(edev, "qedr: Could not create workqueue\n"); @@ -83,8 +86,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev) } } +static void qede_rdma_complete_event(struct kref *ref) +{ + struct qede_rdma_dev *rdma_dev = + container_of(ref, struct qede_rdma_dev, refcnt); + + /* no more events will be added after this */ + complete(&rdma_dev->event_comp); +} + static void qede_rdma_destroy_wq(struct qede_dev *edev) { + /* Avoid race with add_event flow, make sure it finishes before + * we start accessing the list and cleaning up the work + */ + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); + wait_for_completion(&edev->rdma_info.event_comp); + qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); } @@ -310,15 +328,24 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (!edev->rdma_info.qedr_dev) return; + /* We don't want the cleanup flow to start while we're allocating and + * scheduling the work + */ + if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) + return; /* already being destroyed */ + event_node = qede_rdma_get_free_event_node(edev); if (!event_node) - return; + goto out; event_node->event = event; event_node->ptr = edev; INIT_WORK(&event_node->work, qede_rdma_handle_event); queue_work(edev->rdma_info.rdma_wq, &event_node->work); + +out: + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); } void qede_rdma_dev_event_open(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 07f9067affc6..cda5b0a9e948 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); - if (p_dev->ahw->reset.buff == NULL) + if (ahw->reset.buff == NULL) return -ENOMEM; p_buff = p_dev->ahw->reset.buff; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 06de59521fc4..fbf4cbcf1a65 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -13,25 +13,6 @@ #include "rmnet_vnd.h" #include "rmnet_private.h" -/* Locking scheme - - * The shared resource which needs to be protected is realdev->rx_handler_data. - * For the writer path, this is using rtnl_lock(). The writer paths are - * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These - * paths are already called with rtnl_lock() acquired in. There is also an - * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For - * dereference here, we will need to use rtnl_dereference(). Dev list writing - * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). - * For the reader path, the real_dev->rx_handler_data is called in the TX / RX - * path. We only need rcu_read_lock() for these scenarios. In these cases, - * the rcu_read_lock() is held in __dev_queue_xmit() and - * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() - * to get the relevant information. For dev list reading, we again acquire - * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). - * We also use unregister_netdevice_many() to free all rmnet devices in - * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in - * same context. - */ - /* Local Definitions and Declarations */ static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = { @@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev) return rtnl_dereference(real_dev->rx_handler_data); } -static int rmnet_unregister_real_device(struct net_device *real_dev, - struct rmnet_port *port) +static int rmnet_unregister_real_device(struct net_device *real_dev) { + struct rmnet_port *port = rmnet_get_port_rtnl(real_dev); + if (port->nr_rmnet_devs) return -EINVAL; @@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, kfree(port); - /* release reference on real_dev */ - dev_put(real_dev); - netdev_dbg(real_dev, "Removed from rmnet\n"); return 0; } @@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev) return -EBUSY; } - /* hold on to real dev for MAP data */ - dev_hold(real_dev); - for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) INIT_HLIST_HEAD(&port->muxed_ep[entry]); @@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev) return 0; } -static void rmnet_unregister_bridge(struct net_device *dev, - struct rmnet_port *port) +static void rmnet_unregister_bridge(struct rmnet_port *port) { - struct rmnet_port *bridge_port; - struct net_device *bridge_dev; + struct net_device *bridge_dev, *real_dev, *rmnet_dev; + struct rmnet_port *real_port; if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) return; - /* bridge slave handling */ + rmnet_dev = port->rmnet_dev; if (!port->nr_rmnet_devs) { - bridge_dev = port->bridge_ep; + /* bridge device */ + real_dev = port->bridge_ep; + bridge_dev = port->dev; - bridge_port = rmnet_get_port_rtnl(bridge_dev); - bridge_port->bridge_ep = NULL; - bridge_port->rmnet_mode = RMNET_EPMODE_VND; + real_port = rmnet_get_port_rtnl(real_dev); + real_port->bridge_ep = NULL; + real_port->rmnet_mode = RMNET_EPMODE_VND; } else { + /* real device */ bridge_dev = port->bridge_ep; - bridge_port = rmnet_get_port_rtnl(bridge_dev); - rmnet_unregister_real_device(bridge_dev, bridge_port); + port->bridge_ep = NULL; + port->rmnet_mode = RMNET_EPMODE_VND; } + + netdev_upper_dev_unlink(bridge_dev, rmnet_dev); + rmnet_unregister_real_device(bridge_dev); } static int rmnet_newlink(struct net *src_net, struct net_device *dev, @@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, int err = 0; u16 mux_id; + if (!tb[IFLA_LINK]) { + NL_SET_ERR_MSG_MOD(extack, "link not specified"); + return -EINVAL; + } + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev || !dev) return -ENODEV; @@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err1; + err = netdev_upper_dev_link(real_dev, dev, extack); + if (err < 0) + goto err2; + port->rmnet_mode = mode; + port->rmnet_dev = dev; hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); @@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, return 0; +err2: + unregister_netdevice(dev); + rmnet_vnd_dellink(mux_id, port, ep); err1: - rmnet_unregister_real_device(real_dev, port); + rmnet_unregister_real_device(real_dev); err0: kfree(ep); return err; @@ -183,77 +177,74 @@ err0: static void rmnet_dellink(struct net_device *dev, struct list_head *head) { struct rmnet_priv *priv = netdev_priv(dev); - struct net_device *real_dev; + struct net_device *real_dev, *bridge_dev; + struct rmnet_port *real_port, *bridge_port; struct rmnet_endpoint *ep; - struct rmnet_port *port; - u8 mux_id; + u8 mux_id = priv->mux_id; real_dev = priv->real_dev; - if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + if (!rmnet_is_real_dev_registered(real_dev)) return; - port = rmnet_get_port_rtnl(real_dev); - - mux_id = rmnet_vnd_get_mux(dev); + real_port = rmnet_get_port_rtnl(real_dev); + bridge_dev = real_port->bridge_ep; + if (bridge_dev) { + bridge_port = rmnet_get_port_rtnl(bridge_dev); + rmnet_unregister_bridge(bridge_port); + } - ep = rmnet_get_endpoint(port, mux_id); + ep = rmnet_get_endpoint(real_port, mux_id); if (ep) { hlist_del_init_rcu(&ep->hlnode); - rmnet_unregister_bridge(dev, port); - rmnet_vnd_dellink(mux_id, port, ep); + rmnet_vnd_dellink(mux_id, real_port, ep); kfree(ep); } - rmnet_unregister_real_device(real_dev, port); + netdev_upper_dev_unlink(real_dev, dev); + rmnet_unregister_real_device(real_dev); unregister_netdevice_queue(dev, head); } -static void rmnet_force_unassociate_device(struct net_device *dev) +static void rmnet_force_unassociate_device(struct net_device *real_dev) { - struct net_device *real_dev = dev; struct hlist_node *tmp_ep; struct rmnet_endpoint *ep; struct rmnet_port *port; unsigned long bkt_ep; LIST_HEAD(list); - if (!rmnet_is_real_dev_registered(real_dev)) - return; - - ASSERT_RTNL(); - - port = rmnet_get_port_rtnl(dev); - - rcu_read_lock(); - rmnet_unregister_bridge(dev, port); - - hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { - unregister_netdevice_queue(ep->egress_dev, &list); - rmnet_vnd_dellink(ep->mux_id, port, ep); + port = rmnet_get_port_rtnl(real_dev); - hlist_del_init_rcu(&ep->hlnode); - kfree(ep); + if (port->nr_rmnet_devs) { + /* real device */ + rmnet_unregister_bridge(port); + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + unregister_netdevice_queue(ep->egress_dev, &list); + netdev_upper_dev_unlink(real_dev, ep->egress_dev); + rmnet_vnd_dellink(ep->mux_id, port, ep); + hlist_del_init_rcu(&ep->hlnode); + kfree(ep); + } + rmnet_unregister_real_device(real_dev); + unregister_netdevice_many(&list); + } else { + rmnet_unregister_bridge(port); } - - rcu_read_unlock(); - unregister_netdevice_many(&list); - - rmnet_unregister_real_device(real_dev, port); } static int rmnet_config_notify_cb(struct notifier_block *nb, unsigned long event, void *data) { - struct net_device *dev = netdev_notifier_info_to_dev(data); + struct net_device *real_dev = netdev_notifier_info_to_dev(data); - if (!dev) + if (!rmnet_is_real_dev_registered(real_dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: - netdev_dbg(dev, "Kernel unregister\n"); - rmnet_force_unassociate_device(dev); + netdev_dbg(real_dev, "Kernel unregister\n"); + rmnet_force_unassociate_device(real_dev); break; default: @@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (!dev) return -ENODEV; - real_dev = __dev_get_by_index(dev_net(dev), - nla_get_u32(tb[IFLA_LINK])); - - if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + real_dev = priv->real_dev; + if (!rmnet_is_real_dev_registered(real_dev)) return -ENODEV; port = rmnet_get_port_rtnl(real_dev); if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); + return -EINVAL; + } ep = rmnet_get_endpoint(port, priv->mux_id); if (!ep) return -ENODEV; @@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = { .fill_info = rmnet_fill_info, }; -/* Needs either rcu_read_lock() or rtnl lock */ -struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev) { if (rmnet_is_real_dev_registered(real_dev)) - return rcu_dereference_rtnl(real_dev->rx_handler_data); + return rcu_dereference_bh(real_dev->rx_handler_data); else return NULL; } @@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, struct rmnet_port *port, *slave_port; int err; - port = rmnet_get_port(real_dev); + port = rmnet_get_port_rtnl(real_dev); /* If there is more than one rmnet dev attached, its probably being * used for muxing. Skip the briding in that case @@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (port->nr_rmnet_devs > 1) return -EINVAL; + if (port->rmnet_mode != RMNET_EPMODE_VND) + return -EINVAL; + if (rmnet_is_real_dev_registered(slave_dev)) return -EBUSY; @@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (err) return -EBUSY; - slave_port = rmnet_get_port(slave_dev); + err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, + extack); + if (err) { + rmnet_unregister_real_device(slave_dev); + return err; + } + + slave_port = rmnet_get_port_rtnl(slave_dev); slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; slave_port->bridge_ep = real_dev; + slave_port->rmnet_dev = rmnet_dev; port->rmnet_mode = RMNET_EPMODE_BRIDGE; port->bridge_ep = slave_dev; @@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, int rmnet_del_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev) { - struct rmnet_priv *priv = netdev_priv(rmnet_dev); - struct net_device *real_dev = priv->real_dev; - struct rmnet_port *port, *slave_port; + struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev); - port = rmnet_get_port(real_dev); - port->rmnet_mode = RMNET_EPMODE_VND; - port->bridge_ep = NULL; - - slave_port = rmnet_get_port(slave_dev); - rmnet_unregister_real_device(slave_dev, slave_port); + rmnet_unregister_bridge(port); netdev_dbg(slave_dev, "removed from rmnet as slave\n"); return 0; @@ -473,8 +469,8 @@ static int __init rmnet_init(void) static void __exit rmnet_exit(void) { - unregister_netdevice_notifier(&rmnet_dev_notifier); rtnl_link_unregister(&rmnet_link_ops); + unregister_netdevice_notifier(&rmnet_dev_notifier); } module_init(rmnet_init) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index cd0a6bcbe74a..be515982d628 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -28,6 +28,7 @@ struct rmnet_port { u8 rmnet_mode; struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; struct net_device *bridge_ep; + struct net_device *rmnet_dev; }; extern struct rtnl_link_ops rmnet_link_ops; @@ -65,7 +66,7 @@ struct rmnet_priv { struct rmnet_priv_stats stats; }; -struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev); struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); int rmnet_add_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev, diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 1b74bc160402..29a7bfa2584d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, static void rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) { + if (skb_mac_header_was_set(skb)) + skb_push(skb, skb->mac_len); + if (bridge_dev) { skb->dev = bridge_dev; dev_queue_xmit(skb); @@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) return RX_HANDLER_PASS; dev = skb->dev; - port = rmnet_get_port(dev); + port = rmnet_get_port_rcu(dev); switch (port->rmnet_mode) { case RMNET_EPMODE_VND: @@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb) skb->dev = priv->real_dev; mux_id = priv->mux_id; - port = rmnet_get_port(skb->dev); + port = rmnet_get_port_rcu(skb->dev); if (!port) goto drop; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 509dfc895a33..26ad40f19c64 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, return 0; } -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) -{ - struct rmnet_priv *priv; - - priv = netdev_priv(rmnet_dev); - return priv->mux_id; -} - int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) { netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index 54cbaf3c3bc4..14d77c709d4a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, struct rmnet_endpoint *ep); void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); void rmnet_vnd_setup(struct net_device *dev); #endif /* _RMNET_VND_H_ */ diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a2168a14794c..791d99b9e1cf 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); /* fall through */ - case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; default: @@ -5285,6 +5285,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp) if (!tp->phydev) { mdiobus_unregister(new_bus); return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n"); + mdiobus_unregister(new_bus); + return -EUNATCH; } /* PHY will be woken up in rtl_open() */ @@ -5446,15 +5453,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int chipset, region; int jumbo_max, rc; - /* Some tools for creating an initramfs don't consider softdeps, then - * r8169.ko may be in initramfs, but realtek.ko not. Then the generic - * PHY driver is used that doesn't work with most chip versions. - */ - if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) { - dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n"); - return -ENOENT; - } - dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); if (!dev) return -ENOMEM; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c705743d69f7..2cc8184b7e6b 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str) if (!str || !*str) return -EINVAL; while ((opt = strsep(&str, ",")) != NULL) { - if (!strncmp(opt, "eee_timer:", 6)) { + if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 52113b7529d6..3f16bd807c6e 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) } /* Transmit timestamps are only available for 8XXX series. They result - * in three events per packet. These occur in order, and are: - * - the normal completion event + * in up to three events per packet. These occur in order, and are: + * - the normal completion event (may be omitted) * - the low part of the timestamp * - the high part of the timestamp * + * It's possible for multiple completion events to appear before the + * corresponding timestamps. So we can for example get: + * COMP N + * COMP N+1 + * TS_LO N + * TS_HI N + * TS_LO N+1 + * TS_HI N+1 + * + * In addition it's also possible for the adjacent completions to be + * merged, so we may not see COMP N above. As such, the completion + * events are not very useful here. + * * Each part of the timestamp is itself split across two 16 bit * fields in the event. */ @@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) switch (tx_ev_type) { case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: - /* In case of Queue flush or FLR, we might have received - * the previous TX completion event but not the Timestamp - * events. - */ - if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask) - efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); - - tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, - ESF_DZ_TX_DESCR_INDX); - tx_queue->completed_desc_ptr = - tx_ev_desc_ptr & tx_queue->ptr_mask; + /* Ignore this event - see above. */ break; case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: @@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ts_part = efx_ef10_extract_event_ts(event); tx_queue->completed_timestamp_major = ts_part; - efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); - tx_queue->completed_desc_ptr = tx_queue->ptr_mask; + efx_xmit_done_single(tx_queue); break; default: diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index f1bdb04efbe4..95395d67ea2d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); extern unsigned int efx_piobuf_size; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index aeb5e8aa2f2a..73d4e39b5b16 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -583,6 +583,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 2713300343c7..15c731d04065 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -212,12 +212,14 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, * progress on a NIC at any one time. So no need for locking. */ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr[i].u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(hdr[i].u32[0])); for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(inbuf[i].u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(inbuf[i].u32[0])); netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); } @@ -302,15 +304,15 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) */ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr.u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); } for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len + (i * 4), 4); - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr.u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); } netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); @@ -1417,9 +1419,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); - offset = snprintf(buf, len, "%u.%u.%u.%u", - le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), - le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + offset = scnprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), + le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), + le16_to_cpu(ver_words[3])); /* EF10 may have multiple datapath firmware variants within a * single version. Report which variants are running. @@ -1427,9 +1431,9 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - offset += snprintf(buf + offset, len - offset, " rx%x tx%x", - nic_data->rx_dpcpu_fw_id, - nic_data->tx_dpcpu_fw_id); + offset += scnprintf(buf + offset, len - offset, " rx%x tx%x", + nic_data->rx_dpcpu_fw_id, + nic_data->tx_dpcpu_fw_id); /* It's theoretically possible for the string to exceed 31 * characters, though in practice the first three version diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9f9886f222c8..8164f0edcbf0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -208,8 +208,6 @@ struct efx_tx_buffer { * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events - * @completed_desc_ptr: Most recent completed pointer - only used with - * timestamping. * @completed_timestamp_major: Top part of the most recent tx timestamp. * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer @@ -269,7 +267,6 @@ struct efx_tx_queue { unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; - unsigned int completed_desc_ptr; u32 completed_timestamp_major; u32 completed_timestamp_minor; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index af15a737c675..59b4f16896a8 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, u32 nic_major, u32 nic_minor, s32 correction) { + u32 sync_timestamp; ktime_t kt = { 0 }; + s16 delta; if (!(nic_major & 0x80000000)) { WARN_ON_ONCE(nic_major >> 16); - /* Use the top bits from the latest sync event. */ - nic_major &= 0xffff; - nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000); + + /* Medford provides 48 bits of timestamp, so we must get the top + * 16 bits from the timesync event state. + * + * We only have the lower 16 bits of the time now, but we do + * have a full resolution timestamp at some point in past. As + * long as the difference between the (real) now and the sync + * is less than 2^15, then we can reconstruct the difference + * between those two numbers using only the lower 16 bits of + * each. + * + * Put another way + * + * a - b = ((a mod k) - b) mod k + * + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know + * (a mod k) and b, so can calculate the delta, a - b. + * + */ + sync_timestamp = last_sync_timestamp_major(efx); + + /* Because delta is s16 this does an implicit mask down to + * 16 bits which is what we need, assuming + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that + * we can deal with the (unlikely) case of sync timestamps + * arriving from the future. + */ + delta = nic_major - sync_timestamp; + + /* Recover the fully specified time now, by applying the offset + * to the (fully specified) sync time. + */ + nic_major = sync_timestamp + delta; kt = ptp->nic_to_kernel_time(nic_major, nic_minor, correction); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 04d7f41d7ed9..8aafc54a4684 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, return efx_enqueue_skb(tx_queue, skb); } +void efx_xmit_done_single(struct efx_tx_queue *tx_queue) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); +} + void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index b1571e9789d0..70876df1da69 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->xmit_more_available = false; tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && tx_queue->channel == efx_ptp_channel(efx)); - tx_queue->completed_desc_ptr = tx_queue->ptr_mask; tx_queue->completed_timestamp_major = 0; tx_queue->completed_timestamp_minor = 0; @@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; - if (!(buffer->flags & EFX_TX_BUF_OPTION) && - unlikely(buffer->len == 0)) { + if (!efx_tx_buffer_in_use(buffer)) { netif_err(efx, tx_err, efx->net_dev, - "TX queue %d spurious TX completion id %x\n", + "TX queue %d spurious TX completion id %d\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; @@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } } +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) +{ + if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); + if (tx_queue->read_count == tx_queue->old_write_count) { + /* Ensure that read_count is flushed. */ + smp_mb(); + tx_queue->empty_read_count = + tx_queue->read_count | EFX_EMPTY_COUNT_VALID; + } + } +} + void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; @@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) netif_tx_wake_queue(tx_queue->core_txq); } - /* Check whether the hardware queue is now empty */ - if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); - if (tx_queue->read_count == tx_queue->old_write_count) { - smp_mb(); - tx_queue->empty_read_count = - tx_queue->read_count | EFX_EMPTY_COUNT_VALID; - } - } + efx_xmit_done_check_empty(tx_queue); } /* Remove buffers put into a tx_queue for the current packet. diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index f92f1fe3a87f..99cf7ce2f36c 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, unsigned int *pkts_compl, unsigned int *bytes_compl); +static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) +{ + return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); +} + +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index dc50ba13a746..2d5573b3dee1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev) ret = rk_gmac_clk_init(plat_dat); if (ret) - return ret; + goto err_remove_config_dt; ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d0356fbd1e43..542784300620 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -24,6 +24,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw, struct net_device *dev) { + struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); int mtu = dev->mtu; @@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw, * Broadcom tags can look like invalid LLC/SNAP packets and cause the * hardware to truncate packets on reception. */ - if (netdev_uses_dsa(dev)) + if (netdev_uses_dsa(dev) || !priv->plat->enh_desc) value &= ~GMAC_CONTROL_ACS; if (mtu > 1500) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5836b21edd7e..7da18c9afa01 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4405,6 +4405,8 @@ static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + rtnl_lock(); + /* Create per netdev entries */ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); @@ -4416,14 +4418,13 @@ static void stmmac_init_fs(struct net_device *dev) debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); - register_netdevice_notifier(&stmmac_notifier); + rtnl_unlock(); } static void stmmac_exit_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ @@ -4940,14 +4941,14 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); -#ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(ndev); -#endif stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); @@ -5166,6 +5167,7 @@ static int __init stmmac_init(void) /* Create debugfs main directory if it doesn't exist yet */ if (!stmmac_fs_dir) stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + register_netdevice_notifier(&stmmac_notifier); #endif return 0; @@ -5174,6 +5176,7 @@ static int __init stmmac_init(void) static void __exit stmmac_exit(void) { #ifdef CONFIG_DEBUG_FS + unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(stmmac_fs_dir); #endif } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index d10ac54bf385..13fafd905db8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev, * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + stmmac_res->wol_irq = + platform_get_irq_byname_optional(pdev, "eth_wake_irq"); if (stmmac_res->wol_irq < 0) { if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n"); stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; + stmmac_res->lpi_irq = + platform_get_irq_byname_optional(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq < 0) { + if (stmmac_res->lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_lpi not found\n"); + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 276292bca334..53fb8141f1a6 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -375,10 +375,14 @@ struct temac_local { int tx_bd_next; int tx_bd_tail; int rx_bd_ci; + int rx_bd_tail; /* DMA channel control setup */ u32 tx_chnl_ctrl; u32 rx_chnl_ctrl; + u8 coalesce_count_rx; + + struct delayed_work restart_work; }; /* Wrappers for temac_ior()/temac_iow() function pointers above */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 6f11f52c9a9e..9461acec6f70 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -51,6 +51,7 @@ #include <linux/ip.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <linux/dma-mapping.h> #include <linux/processor.h> #include <linux/platform_data/xilinx-ll-temac.h> @@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) + goto out; lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); @@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->tx_bd_next = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; + lp->rx_bd_tail = RX_BD_NUM - 1; /* Enable RX DMA transfers */ wmb(); lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); lp->dma_out(lp, RX_TAILDESC_PTR, - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); /* Prepare for TX DMA transfer */ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); @@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev) stat = be32_to_cpu(cur_p->app0); } + /* Matches barrier in temac_start_xmit */ + smp_mb(); + netif_wake_queue(ndev); } @@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (temac_check_tx_bd_space(lp, num_frag + 1)) { - if (!netif_queue_stopped(ndev)) - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; + if (netif_queue_stopped(ndev)) + return NETDEV_TX_BUSY; + + netif_stop_queue(ndev); + + /* Matches barrier in temac_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (temac_check_tx_bd_space(lp, num_frag)) + return NETDEV_TX_BUSY; + + netif_wake_queue(ndev); } cur_p->app0 = 0; @@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); cur_p->len = cpu_to_be32(skb_headlen(skb)); + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); ptr_to_txbd((void *)skb, cur_p); for (ii = 0; ii < num_frag; ii++) { - lp->tx_bd_tail++; - if (lp->tx_bd_tail >= TX_BD_NUM) + if (++lp->tx_bd_tail >= TX_BD_NUM) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; @@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = TX_BD_NUM - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + while (--ii >= 0) { + --frag; + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = TX_BD_NUM - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + } + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); cur_p->len = cpu_to_be32(skb_frag_size(frag)); cur_p->app0 = 0; @@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +static int ll_temac_recv_buffers_available(struct temac_local *lp) +{ + int available; + + if (!lp->rx_skb[lp->rx_bd_ci]) + return 0; + available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; + if (available <= 0) + available += RX_BD_NUM; + return available; +} static void ll_temac_recv(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - struct sk_buff *skb, *new_skb; - unsigned int bdstat; - struct cdmac_bd *cur_p; - dma_addr_t tail_p, skb_dma_addr; - int length; unsigned long flags; + int rx_bd; + bool update_tail = false; spin_lock_irqsave(&lp->rx_lock, flags); - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - - bdstat = be32_to_cpu(cur_p->app0); - while ((bdstat & STS_CTRL_APP0_CMPLT)) { + /* Process all received buffers, passing them on network + * stack. After this, the buffer descriptors will be in an + * un-allocated stage, where no skb is allocated for it, and + * they are therefore not available for TEMAC/DMA. + */ + do { + struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; + struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; + unsigned int bdstat = be32_to_cpu(bd->app0); + int length; + + /* While this should not normally happen, we can end + * here when GFP_ATOMIC allocations fail, and we + * therefore have un-allocated buffers. + */ + if (!skb) + break; - skb = lp->rx_skb[lp->rx_bd_ci]; - length = be32_to_cpu(cur_p->app4) & 0x3FFF; + /* Loop over all completed buffer descriptors */ + if (!(bdstat & STS_CTRL_APP0_CMPLT)) + break; - dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), + dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + /* The buffer is not valid for DMA anymore */ + bd->phys = 0; + bd->len = 0; + length = be32_to_cpu(bd->app4) & 0x3FFF; skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); @@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev) * (back) for proper IP checksum byte order * (be16). */ - skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF); + skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); + /* The skb buffer is now owned by network stack above */ + lp->rx_skb[lp->rx_bd_ci] = NULL; ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; - new_skb = netdev_alloc_skb_ip_align(ndev, - XTE_MAX_JUMBO_FRAME_SIZE); - if (!new_skb) { - spin_unlock_irqrestore(&lp->rx_lock, flags); - return; + rx_bd = lp->rx_bd_ci; + if (++lp->rx_bd_ci >= RX_BD_NUM) + lp->rx_bd_ci = 0; + } while (rx_bd != lp->rx_bd_tail); + + /* DMA operations will halt when the last buffer descriptor is + * processed (ie. the one pointed to by RX_TAILDESC_PTR). + * When that happens, no more interrupt events will be + * generated. No IRQ_COAL or IRQ_DLY, and not even an + * IRQ_ERR. To avoid stalling, we schedule a delayed work + * when there is a potential risk of that happening. The work + * will call this function, and thus re-schedule itself until + * enough buffers are available again. + */ + if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) + schedule_delayed_work(&lp->restart_work, HZ / 1000); + + /* Allocate new buffers for those buffer descriptors that were + * passed to network stack. Note that GFP_ATOMIC allocations + * can fail (e.g. when a larger burst of GFP_ATOMIC + * allocations occurs), so while we try to allocate all + * buffers in the same interrupt where they were processed, we + * continue with what we could get in case of allocation + * failure. Allocation of remaining buffers will be retried + * in following calls. + */ + while (1) { + struct sk_buff *skb; + struct cdmac_bd *bd; + dma_addr_t skb_dma_addr; + + rx_bd = lp->rx_bd_tail + 1; + if (rx_bd >= RX_BD_NUM) + rx_bd = 0; + bd = &lp->rx_bd_v[rx_bd]; + + if (bd->phys) + break; /* All skb's allocated */ + + skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); + if (!skb) { + dev_warn(&ndev->dev, "skb alloc failed\n"); + break; } - cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); - skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data, + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - cur_p->phys = cpu_to_be32(skb_dma_addr); - cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); - lp->rx_skb[lp->rx_bd_ci] = new_skb; + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, + skb_dma_addr))) { + dev_kfree_skb_any(skb); + break; + } - lp->rx_bd_ci++; - if (lp->rx_bd_ci >= RX_BD_NUM) - lp->rx_bd_ci = 0; + bd->phys = cpu_to_be32(skb_dma_addr); + bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); + bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); + lp->rx_skb[rx_bd] = skb; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - bdstat = be32_to_cpu(cur_p->app0); + lp->rx_bd_tail = rx_bd; + update_tail = true; + } + + /* Move tail pointer when buffers have been allocated */ + if (update_tail) { + lp->dma_out(lp, RX_TAILDESC_PTR, + lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); } - lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); spin_unlock_irqrestore(&lp->rx_lock, flags); } +/* Function scheduled to ensure a restart in case of DMA halt + * condition caused by running out of buffer descriptors. + */ +static void ll_temac_restart_work_func(struct work_struct *work) +{ + struct temac_local *lp = container_of(work, struct temac_local, + restart_work.work); + struct net_device *ndev = lp->ndev; + + ll_temac_recv(ndev); +} + static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; @@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev) dev_dbg(&ndev->dev, "temac_close()\n"); + cancel_delayed_work_sync(&lp->restart_work); + free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); @@ -1173,6 +1301,7 @@ static int temac_probe(struct platform_device *pdev) lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; spin_lock_init(&lp->rx_lock); + INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); /* Setup mutex for synchronization of indirect register access */ if (pdata) { @@ -1279,6 +1408,7 @@ static int temac_probe(struct platform_device *pdev) */ lp->tx_chnl_ctrl = 0x10220000; lp->rx_chnl_ctrl = 0xff070000; + lp->coalesce_count_rx = 0x07; /* Finished with the DMA node; drop the reference */ of_node_put(dma_np); @@ -1310,11 +1440,14 @@ static int temac_probe(struct platform_device *pdev) (pdata->tx_irq_count << 16); else lp->tx_chnl_ctrl = 0x10220000; - if (pdata->rx_irq_timeout || pdata->rx_irq_count) + if (pdata->rx_irq_timeout || pdata->rx_irq_count) { lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | (pdata->rx_irq_count << 16); - else + lp->coalesce_count_rx = pdata->rx_irq_count; + } else { lp->rx_chnl_ctrl = 0xff070000; + lp->coalesce_count_rx = 0x07; + } } /* Error handle returned DMA RX and TX interrupts */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 75757e9954ba..09f279c0182b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head) if (!net_eq(dev_net(geneve->dev), net)) unregister_netdevice_queue(geneve->dev, head); } - - WARN_ON_ONCE(!list_empty(&gn->sock_list)); } static void __net_exit geneve_exit_batch_net(struct list_head *net_list) @@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list) /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + const struct geneve_net *gn = net_generic(net, geneve_net_id); + + WARN_ON_ONCE(!list_empty(&gn->sock_list)); + } } static struct pernet_operations geneve_net_ops = { diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index ae3f3084c2ed..1b320bcf150a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; - net_device->tx_disable = false; + net_device->tx_disable = true; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 65e12cb07f45..2c0a24c606fc 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1068,6 +1068,7 @@ static int netvsc_attach(struct net_device *ndev, } /* In any case device is now ready */ + nvdev->tx_disable = false; netif_device_attach(ndev); /* Note: enable and attach happen when sub-channels setup */ @@ -2476,6 +2477,8 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; + nvdev->tx_disable = false; + ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 242b9b0943f8..7fe306e76281 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - skb->tc_redirected = 0; + skb->redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); @@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp) rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; - if (!skb->tc_from_ingress) { + if (!skb->from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); @@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (!skb->tc_redirected || !skb->skb_iif) { + if (!skb->redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 30cd0c4f0be0..8801d093135c 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work) } if (dev) dev_put(dev); + cond_resched(); } } @@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb) struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; - /* In this mode we dont care about multicast and broadcast traffic */ - if (is_multicast_ether_addr(ethh->h_dest)) { - pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n", - ntohs(skb->protocol)); - kfree_skb(skb); - goto out; - } - /* The ipvlan is a pseudo-L2 device, so the packets that we receive * will have L2; which need to discarded and processed further * in the net-ns of the main-device. */ if (skb_mac_header_was_set(skb)) { + /* In this mode we dont care about + * multicast and broadcast traffic */ + if (is_multicast_ether_addr(ethh->h_dest)) { + pr_debug_ratelimited( + "Dropped {multi|broad}cast of type=[%x]\n", + ntohs(skb->protocol)); + kfree_skb(skb); + goto out; + } + skb_pull(skb, sizeof(*ethh)); skb->mac_header = (typeof(skb->mac_header))~0U; skb_reset_network_header(skb); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index a70662261a5a..f195f278a83a 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev) static int ipvlan_open(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct net_device *phy_dev = ipvlan->phy_dev; struct ipvl_addr *addr; if (ipvlan->port->mode == IPVLAN_MODE_L3 || @@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev) ipvlan_ht_addr_add(ipvlan, addr); rcu_read_unlock(); - return dev_uc_add(phy_dev, phy_dev->dev_addr); + return 0; } static int ipvlan_stop(struct net_device *dev) @@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_unsync(phy_dev, dev); dev_mc_unsync(phy_dev, dev); - dev_uc_del(phy_dev, phy_dev->dev_addr); - rcu_read_lock(); list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_del(addr); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 45bfd99f17fa..92bc2b2df660 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -19,6 +19,7 @@ #include <net/gro_cells.h> #include <net/macsec.h> #include <linux/phy.h> +#include <linux/if_arp.h> #include <uapi/linux/if_macsec.h> @@ -424,6 +425,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) return (struct macsec_eth_header *)skb_mac_header(skb); } +static sci_t dev_to_sci(struct net_device *dev, __be16 port) +{ + return make_sci(dev->dev_addr, port); +} + static void __macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) { @@ -3268,6 +3274,20 @@ static int macsec_set_mac_address(struct net_device *dev, void *p) out: ether_addr_copy(dev->dev_addr, addr->sa_data); + macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); + + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_upd_secy, &ctx); + } + } + return 0; } @@ -3342,6 +3362,7 @@ static const struct device_type macsec_type = { static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, + [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, @@ -3592,11 +3613,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci) return false; } -static sci_t dev_to_sci(struct net_device *dev, __be16 port) -{ - return make_sci(dev->dev_addr, port); -} - static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3650,6 +3666,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; + if (real_dev->type != ARPHRD_ETHER) + return -EINVAL; dev->priv_flags |= IFF_MACSEC; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 81aa7adf4801..e7289d67268f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w) if (src) dev_put(src->dev); consume_skb(skb); + + cond_resched(); } } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index e27fc1a4516d..3811f1bde84e 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -29,9 +29,9 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, return -ENOMEM; p = buf; - p += snprintf(p, bufsize - (p - buf), - "SA count=%u tx=%u\n", - ipsec->count, ipsec->tx); + p += scnprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { struct nsim_sa *sap = &ipsec->sa[i]; @@ -39,18 +39,18 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, if (!sap->used) continue; - p += snprintf(p, bufsize - (p - buf), - "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", - i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], - sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); - p += snprintf(p, bufsize - (p - buf), - "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", - i, be32_to_cpu(sap->xs->id.spi), - sap->xs->id.proto, sap->salt, sap->crypt); - p += snprintf(p, bufsize - (p - buf), - "sa[%i] key=0x%08x %08x %08x %08x\n", - i, sap->key[0], sap->key[1], - sap->key[2], sap->key[3]); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); } len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 23f1958ba6ad..459fb2069c7e 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, + .name = "Broadcom BCM63XX (2)", /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 7d68b28bb893..a62229a8b1a4 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -410,7 +410,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev) struct device_node *np = phydev->mdio.dev.of_node; int ret; - /* Aneg firsly. */ + /* Aneg firstly. */ ret = genphy_config_aneg(phydev); /* Then we can set up the delay. */ @@ -463,7 +463,7 @@ static int bcm54616s_config_aneg(struct phy_device *phydev) { int ret; - /* Aneg firsly. */ + /* Aneg firstly. */ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) ret = genphy_c37_config_aneg(phydev); else diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 967f57ed0b65..9a07ad137c2e 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -28,7 +28,8 @@ #define DP83867_CTRL 0x1f /* Extended Registers */ -#define DP83867_CFG4 0x0031 +#define DP83867_FLD_THR_CFG 0x002e +#define DP83867_CFG4 0x0031 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6)) #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5) #define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5) @@ -91,6 +92,7 @@ #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0) #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0 #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2) +#define DP83867_STRAP_STS2_STRAP_FLD BIT(10) /* PHY CTRL bits */ #define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14 @@ -125,6 +127,9 @@ /* CFG4 bits */ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0) +/* FLD_THR_CFG */ +#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7 + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -476,6 +481,20 @@ static int dp83867_config_init(struct phy_device *phydev) phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, BIT(7)); + bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2); + if (bs & DP83867_STRAP_STS2_STRAP_FLD) { + /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will + * be set to 0x2. This may causes the PHY link to be unstable - + * the default value 0x1 need to be restored. + */ + ret = phy_modify_mmd(phydev, DP83867_DEVADDR, + DP83867_FLD_THR_CFG, + DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK, + 0x1); + if (ret) + return ret; + } + if (phy_interface_is_rgmii(phydev) || phydev->interface == PHY_INTERFACE_MODE_SGMII) { val = phy_read(phydev, MII_DP83867_PHYCTRL); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 28e33ece4ce1..9a8badafea8a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1306,6 +1306,9 @@ static int marvell_read_status_page_an(struct phy_device *phydev, } } + if (!(status & MII_M1011_PHY_STATUS_RESOLVED)) + return 0; + if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) phydev->duplex = DUPLEX_FULL; else @@ -1365,6 +1368,8 @@ static int marvell_read_status_page(struct phy_device *phydev, int page) linkmode_zero(phydev->lp_advertising); phydev->pause = 0; phydev->asym_pause = 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; if (phydev->autoneg == AUTONEG_ENABLE) err = marvell_read_status_page_an(phydev, fiber, status); diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index 7e9975d25066..f1ded03f0229 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -178,6 +178,23 @@ static int iproc_mdio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +int iproc_mdio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); + + /* restore the mii clock configuration */ + iproc_mdio_config_clk(priv->base); + + return 0; +} + +static const struct dev_pm_ops iproc_mdio_pm_ops = { + .resume = iproc_mdio_resume +}; +#endif /* CONFIG_PM_SLEEP */ + static const struct of_device_id iproc_mdio_of_match[] = { { .compatible = "brcm,iproc-mdio", }, { /* sentinel */ }, @@ -188,6 +205,9 @@ static struct platform_driver iproc_mdio_driver = { .driver = { .name = "iproc-mdio", .of_match_table = iproc_mdio_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &iproc_mdio_pm_ops, +#endif }, .probe = iproc_mdio_probe, .remove = iproc_mdio_remove, diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4a28fb29adaa..fbd36891ee64 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) + priv->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - else - priv->clk = NULL; ret = clk_prepare_enable(priv->clk); if (ret) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 88d409e48c1f..aad6809ebe39 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev) static int mdio_mux_iproc_resume(struct device *dev) { struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); + int rc; - clk_prepare_enable(md->core_clk); + rc = clk_prepare_enable(md->core_clk); + if (rc) { + dev_err(md->dev, "failed to enable core clk\n"); + return rc; + } mdio_mux_iproc_config(md); return 0; diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 937ac7da2789..f686f40f6bdc 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -345,11 +345,11 @@ enum macsec_bank { BIT(VSC8531_FORCE_LED_OFF) | \ BIT(VSC8531_FORCE_LED_ON)) -#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin" +#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin" #define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800 #define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48 -#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin" +#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin" #define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000 #define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8 diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index a1caeee12236..dd2e23fb67c0 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -167,7 +167,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); */ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) { - int ret = 0; + int ret; if (!restart) { /* Configure and restart aneg if it wasn't set before */ @@ -180,9 +180,9 @@ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) } if (restart) - ret = genphy_c45_restart_aneg(phydev); + return genphy_c45_restart_aneg(phydev); - return ret; + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d76e038cf2cb..355bfdef48d2 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -727,7 +727,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) phy_trigger_machine(phydev); } - if (phy_clear_interrupt(phydev)) + /* did_interrupt() may have cleared the interrupt already */ + if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev)) goto phy_err; return IRQ_HANDLED; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6a5056e0ae77..28e3c5c0e3c3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -247,7 +247,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) * MDIO bus driver and clock gated at this point. */ if (!netdev) - return !phydev->suspended; + goto out; if (netdev->wol_enabled) return false; @@ -267,7 +267,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) if (device_may_wakeup(&netdev->dev)) return false; - return true; +out: + return !phydev->suspended; } static int mdio_bus_phy_suspend(struct device *dev) @@ -285,6 +286,8 @@ static int mdio_bus_phy_suspend(struct device *dev) if (!mdio_bus_phy_may_suspend(phydev)) return 0; + phydev->suspended_by_mdio_bus = 1; + return phy_suspend(phydev); } @@ -293,9 +296,11 @@ static int mdio_bus_phy_resume(struct device *dev) struct phy_device *phydev = to_phy_device(dev); int ret; - if (!mdio_bus_phy_may_suspend(phydev)) + if (!phydev->suspended_by_mdio_bus) goto no_resume; + phydev->suspended_by_mdio_bus = 0; + ret = phy_resume(phydev); if (ret < 0) return ret; @@ -1792,7 +1797,7 @@ EXPORT_SYMBOL(genphy_restart_aneg); */ int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart) { - int ret = 0; + int ret; if (!restart) { /* Advertisement hasn't changed, but maybe aneg was never on to @@ -1807,9 +1812,9 @@ int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart) } if (restart) - ret = genphy_restart_aneg(phydev); + return genphy_restart_aneg(phydev); - return ret; + return 0; } EXPORT_SYMBOL(genphy_check_and_restart_aneg); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 70b9a143db84..6e66b8e77ec7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -761,8 +761,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, config.interface = interface; ret = phylink_validate(pl, supported, &config); - if (ret) + if (ret) { + phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n", + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising, + ret); return ret; + } phy->phylink = pl; phy->phy_link_change = phylink_phy_change; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index d949ea7b4f8c..6900c68260e0 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -572,13 +572,15 @@ static void sfp_upstream_clear(struct sfp_bus *bus) * the sfp_bus structure, incrementing its reference count. This must * be put via sfp_bus_put() when done. * - * Returns: on success, a pointer to the sfp_bus structure, - * %NULL if no SFP is specified, - * on failure, an error pointer value: - * corresponding to the errors detailed for - * fwnode_property_get_reference_args(). - * %-ENOMEM if we failed to allocate the bus. - * an error from the upstream's connect_phy() method. + * Returns: + * - on success, a pointer to the sfp_bus structure, + * - %NULL if no SFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. */ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) { @@ -612,13 +614,15 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode); * the SFP bus using sfp_register_upstream(). This takes a reference on the * bus, so it is safe to put the bus after this call. * - * Returns: on success, a pointer to the sfp_bus structure, - * %NULL if no SFP is specified, - * on failure, an error pointer value: - * corresponding to the errors detailed for - * fwnode_property_get_reference_args(). - * %-ENOMEM if we failed to allocate the bus. - * an error from the upstream's connect_phy() method. + * Returns: + * - on success, a pointer to the sfp_bus structure, + * - %NULL if no SFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. */ int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, const struct sfp_upstream_ops *ops) diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 58a69f830d29..f78ceba42e57 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, struct cstate *cs = lcs->next; unsigned long deltaS, deltaA; short changes = 0; - int hlen; + int nlen, hlen; unsigned char new_seq[16]; unsigned char *cp = new_seq; struct iphdr *ip; @@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, return isize; ip = (struct iphdr *) icp; + if (ip->version != 4 || ip->ihl < 5) + return isize; /* Bail if this packet isn't TCP, or is an IP fragment */ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) { @@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, comp->sls_o_tcp++; return isize; } - /* Extract TCP header */ + nlen = ip->ihl * 4; + if (isize < nlen + sizeof(*th)) + return isize; - th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4); - hlen = ip->ihl*4 + th->doff*4; + th = (struct tcphdr *)(icp + nlen); + if (th->doff < sizeof(struct tcphdr) / 4) + return isize; + hlen = nlen + th->doff * 4; /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or * some other control bit is set). Also uncompressible if diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 6f4d7ba8b109..babb01888b78 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -863,7 +863,10 @@ err_free_chan: tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); sl_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); free_netdev(sl->dev); + return err; err_exit: rtnl_unlock(); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ca70a1d840eb..4004f98e50d9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, + [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 }, + [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 }, }; static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3b7a3b8a5e06..6c738a271257 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -337,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net) netdev_dbg(net, "mode: raw IP\n"); } else if (!net->header_ops) { /* don't bother if already set */ ether_setup(net); + /* Restoring min/max mtu values set originally by usbnet */ + net->min_mtu = 0; + net->max_mtu = ETH_MAX_MTU; clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: Ethernet\n"); } @@ -1207,6 +1210,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ + {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 78ddbaf6401b..95b19ce96513 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3221,6 +3221,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired) } msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } return data; @@ -5402,7 +5404,10 @@ static void r8153_init(struct r8152 *tp) if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } data = r8153_phy_status(tp, 0); @@ -5539,7 +5544,10 @@ static void r8153b_init(struct r8152 *tp) if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; } data = r8153_phy_status(tp, 0); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8cdc4415fa70..d4cbb9e8c63f 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -328,7 +328,7 @@ static void veth_get_stats64(struct net_device *dev, rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { - tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes); + veth_stats_tx(peer, &packets, &bytes); tot->rx_bytes += bytes; tot->rx_packets += packets; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d3b08b76b1ec..45308b3350cf 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&vxlan->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + return 0; } @@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev) vxlan->dev = dev; - gro_cells_init(&vxlan->gro_cells, dev); - for (h = 0; h < FDB_HASH_SIZE; ++h) { spin_lock_init(&vxlan->hash_lock[h]); INIT_HLIST_HEAD(&vxlan->fdb_head[h]); diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 43db442b1373..3ac3f8570ca1 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) u32 mtu; int ret; - if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { + if (unlikely(!wg_check_packet_protocol(skb))) { ret = -EPROTONOSUPPORT; net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); goto err; @@ -258,6 +258,8 @@ static void wg_setup(struct net_device *dev) enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; + const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); dev->netdev_ops = &netdev_ops; dev->hard_header_len = 0; @@ -271,9 +273,8 @@ static void wg_setup(struct net_device *dev) dev->features |= WG_NETDEV_FEATURES; dev->hw_features |= WG_NETDEV_FEATURES; dev->hw_enc_features |= WG_NETDEV_FEATURES; - dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - - sizeof(struct udphdr) - - max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); + dev->mtu = ETH_DATA_LEN - overhead; + dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; SET_NETDEV_DEVTYPE(dev, &device_type); diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index bda26405497c..802099c8828a 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -411,11 +411,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) peer = wg_peer_create(wg, public_key, preshared_key); if (IS_ERR(peer)) { - /* Similar to the above, if the key is invalid, we skip - * it without fanfare, so that services don't need to - * worry about doing key validation themselves. - */ - ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); + ret = PTR_ERR(peer); peer = NULL; goto out; } @@ -569,7 +565,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info) private_key); list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { - BUG_ON(!wg_noise_precompute_static_static(peer)); + wg_noise_precompute_static_static(peer); wg_noise_expire_current_peer_keypairs(peer); } wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 919d9d866446..708dc61c974f 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -44,32 +44,23 @@ void __init wg_noise_init(void) } /* Must hold peer->handshake.static_identity->lock */ -bool wg_noise_precompute_static_static(struct wg_peer *peer) +void wg_noise_precompute_static_static(struct wg_peer *peer) { - bool ret; - down_write(&peer->handshake.lock); - if (peer->handshake.static_identity->has_identity) { - ret = curve25519( - peer->handshake.precomputed_static_static, + if (!peer->handshake.static_identity->has_identity || + !curve25519(peer->handshake.precomputed_static_static, peer->handshake.static_identity->static_private, - peer->handshake.remote_static); - } else { - u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; - - ret = curve25519(empty, empty, peer->handshake.remote_static); + peer->handshake.remote_static)) memset(peer->handshake.precomputed_static_static, 0, NOISE_PUBLIC_KEY_LEN); - } up_write(&peer->handshake.lock); - return ret; } -bool wg_noise_handshake_init(struct noise_handshake *handshake, - struct noise_static_identity *static_identity, - const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], - const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], - struct wg_peer *peer) +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer) { memset(handshake, 0, sizeof(*handshake)); init_rwsem(&handshake->lock); @@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct noise_handshake *handshake, NOISE_SYMMETRIC_KEY_LEN); handshake->static_identity = static_identity; handshake->state = HANDSHAKE_ZEROED; - return wg_noise_precompute_static_static(peer); + wg_noise_precompute_static_static(peer); } static void handshake_zero(struct noise_handshake *handshake) @@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], return true; } +static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) +{ + static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; + if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) + return false; + kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, + chaining_key); + return true; +} + static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) { struct blake2s_state blake; @@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, NOISE_PUBLIC_KEY_LEN, key, handshake->hash); /* ss */ - kdf(handshake->chaining_key, key, NULL, - handshake->precomputed_static_static, NOISE_HASH_LEN, - NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, - handshake->chaining_key); + if (!mix_precomputed_dh(handshake->chaining_key, key, + handshake->precomputed_static_static)) + goto out; /* {t} */ tai64n_now(timestamp); @@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, handshake = &peer->handshake; /* ss */ - kdf(chaining_key, key, NULL, handshake->precomputed_static_static, - NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, - chaining_key); + if (!mix_precomputed_dh(chaining_key, key, + handshake->precomputed_static_static)) + goto out; /* {t} */ if (!message_decrypt(t, src->encrypted_timestamp, diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h index 138a07bb817c..f532d59d3f19 100644 --- a/drivers/net/wireguard/noise.h +++ b/drivers/net/wireguard/noise.h @@ -94,11 +94,11 @@ struct noise_handshake { struct wg_device; void wg_noise_init(void); -bool wg_noise_handshake_init(struct noise_handshake *handshake, - struct noise_static_identity *static_identity, - const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], - const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], - struct wg_peer *peer); +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer); void wg_noise_handshake_clear(struct noise_handshake *handshake); static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) { @@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); void wg_noise_set_static_identity_private_key( struct noise_static_identity *static_identity, const u8 private_key[NOISE_PUBLIC_KEY_LEN]); -bool wg_noise_precompute_static_static(struct wg_peer *peer); +void wg_noise_precompute_static_static(struct wg_peer *peer); bool wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c index 071eedf33f5a..1d634bd3038f 100644 --- a/drivers/net/wireguard/peer.c +++ b/drivers/net/wireguard/peer.c @@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, return ERR_PTR(ret); peer->device = wg; - if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, - public_key, preshared_key, peer)) { - ret = -EKEYREJECTED; - goto err_1; - } + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, + public_key, preshared_key, peer); if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) goto err_1; if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index fecb559cbdb6..3432232afe06 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -66,7 +66,7 @@ struct packet_cb { #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) /* Returns either the correct skb->protocol value, or 0 if invalid. */ -static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) +static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) { if (skb_network_header(skb) >= skb->head && (skb_network_header(skb) + sizeof(struct iphdr)) <= @@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) return 0; } +static inline bool wg_check_packet_protocol(struct sk_buff *skb) +{ + __be16 real_protocol = wg_examine_packet_protocol(skb); + return real_protocol && skb->protocol == real_protocol; +} + static inline void wg_reset_packet(struct sk_buff *skb) { skb_scrub_packet(skb, true); @@ -94,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb) skb->dev = NULL; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; - skb_reset_tc(skb); #endif + skb_reset_redirect(skb); skb->hdr_len = skb_headroom(skb); skb_reset_mac_header(skb); skb_reset_network_header(skb); diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 9c6bab9c981f..da3b782ab7d3 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) size_t data_offset, data_len, header_len; struct udphdr *udp; - if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || + if (unlikely(!wg_check_packet_protocol(skb) || skb_transport_header(skb) < skb->head || (skb_transport_header(skb) + sizeof(struct udphdr)) > skb_tail_pointer(skb))) @@ -118,10 +118,13 @@ static void wg_receive_handshake_packet(struct wg_device *wg, under_load = skb_queue_len(&wg->incoming_handshakes) >= MAX_QUEUED_INCOMING_HANDSHAKES / 8; - if (under_load) + if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); - else if (last_under_load) + } else if (last_under_load) { under_load = !wg_birthdate_has_expired(last_under_load, 1); + if (!under_load) + last_under_load = 0; + } mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, under_load); if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || @@ -385,7 +388,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ - skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); + skb->protocol = wg_examine_packet_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) @@ -584,8 +587,7 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) wg_packet_consume_data(wg, skb); break; default: - net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", - wg->dev->name, skb); + WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); goto err; } return; diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index c13260563446..7348c10cbae3 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_peer *peer) static unsigned int calculate_skb_padding(struct sk_buff *skb) { + unsigned int padded_size, last_unit = skb->len; + + if (unlikely(!PACKET_CB(skb)->mtu)) + return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; + /* We do this modulo business with the MTU, just in case the networking * layer gives us a packet that's bigger than the MTU. In that case, we * wouldn't want the final subtraction to overflow in the case of the - * padded_size being clamped. + * padded_size being clamped. Fortunately, that's very rarely the case, + * so we optimize for that not happening. */ - unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; - unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); + if (unlikely(last_unit > PACKET_CB(skb)->mtu)) + last_unit %= PACKET_CB(skb)->mtu; - if (padded_size > PACKET_CB(skb)->mtu) - padded_size = PACKET_CB(skb)->mtu; + padded_size = min(PACKET_CB(skb)->mtu, + ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); return padded_size - last_unit; } diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 262f3b5c819d..b0d6541582d3 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4, wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); mutex_unlock(&wg->socket_update_lock); synchronize_rcu(); - synchronize_net(); sock_free(old4); sock_free(old6); } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 5ec16ce19b69..a202a4eea76a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1052,11 +1052,11 @@ static int ath10k_download_fw(struct ath10k *ar) } memset(&latency_qos, 0, sizeof(latency_qos)); - pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&latency_qos, 0); ret = ath10k_bmi_fast_download(ar, address, data, data_len); - pm_qos_remove_request(&latency_qos); + cpu_latency_qos_remove_request(&latency_qos); return ret; } diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 536cd729c086..5dfcce77d094 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -1730,7 +1730,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) /* the ipw2100 hardware really doesn't want power management delays * longer than 175usec */ - pm_qos_update_request(&ipw2100_pm_qos_req, 175); + cpu_latency_qos_update_request(&ipw2100_pm_qos_req, 175); /* If the interrupt is enabled, turn it off... */ spin_lock_irqsave(&priv->low_lock, flags); @@ -1875,7 +1875,8 @@ static void ipw2100_down(struct ipw2100_priv *priv) ipw2100_disable_interrupts(priv); spin_unlock_irqrestore(&priv->low_lock, flags); - pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&ipw2100_pm_qos_req, + PM_QOS_DEFAULT_VALUE); /* We have to signal any supplicant if we are disassociating */ if (associated) @@ -6566,8 +6567,7 @@ static int __init ipw2100_init(void) printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); - pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); ret = pci_register_driver(&ipw2100_pci_driver); if (ret) @@ -6594,7 +6594,7 @@ static void __exit ipw2100_exit(void) &driver_attr_debug_level); #endif pci_unregister_driver(&ipw2100_pci_driver); - pm_qos_remove_request(&ipw2100_pm_qos_req); + cpu_latency_qos_remove_request(&ipw2100_pm_qos_req); } module_init(ipw2100_init); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index a22a830019c0..355af47c5f73 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -283,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; @@ -309,6 +310,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 48d375a86d86..ba2aff3af0fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -491,13 +491,13 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile); -void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) +int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table) { int ret, i, j; if (!iwl_sar_geo_support(fwrt)) - return; + return -EOPNOTSUPP; ret = iwl_sar_get_wgds_table(fwrt); if (ret < 0) { @@ -505,7 +505,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ - return; + return -ENOENT; } BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * @@ -530,5 +530,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, i, j, value[1], value[2], value[0]); } } + + return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 4a6e8262974b..5590e5cc8fbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -171,8 +171,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, struct iwl_host_cmd *cmd); -void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table); +int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) @@ -243,9 +244,10 @@ static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, return -ENOENT; } -static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) +static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table) { + return -ENOENT; } #endif /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 91df1ee25dd0..8796ab8f2a5f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1409,11 +1409,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, goto out; } - /* - * region register have absolute value so apply rxf offset after - * reading the registers - */ - offs += rxf_data.offset; + offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); @@ -2494,10 +2490,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) goto out; } - if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) { - IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n"); - goto out; - } + iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) @@ -2662,14 +2655,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, return 0; } -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop) +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop) { int ret = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) - return 0; + return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) @@ -2686,7 +2679,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, iwl_fw_set_dbg_rec_on(fwrt); } #endif - - return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 179f2905d56b..9d3513213f5f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -239,9 +239,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop); +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 2d1cb4647c3b..0481796f75bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i], GFP_KERNEL); - if (!pieces->dbg_conf_tlv_len[i]) + if (!pieces->dbg_conf_tlv[i]) goto out_free_fw; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 54c094e88474..98263cd37944 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -762,10 +762,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); union geo_tx_power_profiles_cmd cmd; u16 len; + int ret; cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); - iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table); + ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table); + /* + * It is a valid scenario to not support SAR, or miss wgds table, + * but in that case there is no need to send the command. + */ + if (ret) + return 0; cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 70b29bf16bb9..60296a754af2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -308,7 +308,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } /* PHY_SKU section is mandatory in B0 */ - if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { + if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT && + !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n"); return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index e2cf9e015ef8..ca99a9c4f70e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - /* consider our LDPC support in case of HE */ + /* consider LDPC support in case of HE */ + if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + if (sband->iftype_data && sband->iftype_data->he_cap.has_he && !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) @@ -191,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, { u16 supp; int i, highest_mcs; + u8 nss = sta->rx_nss; - for (i = 0; i < sta->rx_nss; i++) { - if (i == IWL_TLC_NSS_MAX) - break; + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); if (!highest_mcs) continue; @@ -241,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; + u8 nss = sta->rx_nss; + + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; - for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; @@ -303,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = - cpu_to_le16(ht_cap->mcs.rx_mask[1]); + + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + 0; + else + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index c0b420fe5e48..1babc4bb5194 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status)) { iwl_mvm_te_check_disconnect(mvm, vif, "Session protection failure"); + spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } if (le32_to_cpu(notif->start)) { @@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, */ iwl_mvm_te_check_disconnect(mvm, vif, "No beacon heard and the session protection is over already..."); + spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } goto out_unlock; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 97f227f3cbc3..f441b20e1642 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -981,6 +981,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name), diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index e753f43e0162..0e42de291803 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -365,17 +365,6 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv, return ctx; } - -/* Hopefully the real complete_all will soon be exported, in the mean - * while this should work. */ -static inline void ezusb_complete_all(struct completion *comp) -{ - complete(comp); - complete(comp); - complete(comp); - complete(comp); -} - static void ezusb_ctx_complete(struct request_context *ctx) { struct ezusb_priv *upriv = ctx->upriv; @@ -409,7 +398,7 @@ static void ezusb_ctx_complete(struct request_context *ctx) netif_wake_queue(dev); } - ezusb_complete_all(&ctx->done); + complete_all(&ctx->done); ezusb_request_context_put(ctx); break; @@ -419,7 +408,7 @@ static void ezusb_ctx_complete(struct request_context *ctx) /* This is normal, as all request contexts get flushed * when the device is disconnected */ err("Called, CTX not terminating, but device gone"); - ezusb_complete_all(&ctx->done); + complete_all(&ctx->done); ezusb_request_context_put(ctx); break; } @@ -690,11 +679,11 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, * get the chance to run themselves. So we make sure * that we don't sleep for ever */ int msecs = DEF_TIMEOUT * (1000 / HZ); - while (!ctx->done.done && msecs--) + + while (!try_wait_for_completion(&ctx->done) && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, - ctx->done.done); + wait_for_completion(&ctx->done); } break; default: diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6173c80189ba..1847f55e199b 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -447,10 +447,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb); - offset += q->buf_offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, - q->buf_size); + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + offset += q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, + q->buf_size); + } if (more) return; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index 917729807514..e17f70b4d199 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) rxmcs == DESC92C_RATE11M) struct phy_status_rpt { + u8 padding[2]; u8 ch_corr[2]; u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ed049c9f7e29..f140f7d7f553 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6274,7 +6274,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | -+ WIPHY_FLAG_IBSS_RSN; + WIPHY_FLAG_IBSS_RSN; wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 0cc9ac856fe2..ed2123129e0e 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) const struct firmware *fw; struct sk_buff *skb; unsigned long len; - u8 max_size, payload_size; + int max_size, payload_size; int rc = 0; if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) || @@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) while (len) { - payload_size = min_t(unsigned long, (unsigned long) max_size, - len); + payload_size = min_t(unsigned long, max_size, len); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size), GFP_KERNEL); diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 720c89d6066e..4ac8cb262559 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -225,6 +225,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) out: gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); + usleep_range(10000, 15000); } static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 2b83156efe3f..b788870473e8 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -682,7 +682,7 @@ static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb) static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) { - pr_debug("supported protocol %d\b", target->supported_protocols); + pr_debug("supported protocol %d\n", target->supported_protocols); if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK)) { return nfc_hci_send_cmd(hdev, target->hci_reader_gate, diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 677d6f45b5c4..43751fab9d36 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -249,13 +249,12 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk)); available_disk_size = internal_nlba * nsblk_sector_size(nsblk); - q = blk_alloc_queue(GFP_KERNEL); + q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE); if (!q) return -ENOMEM; if (devm_add_action_or_reset(dev, nd_blk_release_queue, q)) return -ENOMEM; - blk_queue_make_request(q, nd_blk_make_request); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 0d04ea3d9fd7..3b09419218d6 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1521,7 +1521,7 @@ static int btt_blk_init(struct btt *btt) struct nd_namespace_common *ndns = nd_btt->ndns; /* create a new disk and request queue for btt */ - btt->btt_queue = blk_alloc_queue(GFP_KERNEL); + btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE); if (!btt->btt_queue) return -ENOMEM; @@ -1540,7 +1540,6 @@ static int btt_blk_init(struct btt *btt) btt->btt_disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; - blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 4eae441f86c9..4ffc6f7ca131 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -395,7 +395,7 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); + q = blk_alloc_queue(pmem_make_request, dev_to_node(dev)); if (!q) return -ENOMEM; @@ -433,7 +433,6 @@ static int pmem_attach_disk(struct device *dev, pmem->virt_addr = addr; blk_queue_write_cache(q, true, fua); - blk_queue_make_request(q, pmem_make_request); blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b9358db83e96..9c17ed32be64 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -32,8 +32,6 @@ config NVME_HWMON a hardware monitoring device will be created for each NVMe drive in the system. - If unsure, say N. - config NVME_FABRICS tristate diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ada59df642d2..4f907e3beda1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -171,7 +171,6 @@ static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) nvme_remove_namespaces(ctrl); ctrl->ops->delete_ctrl(ctrl); nvme_uninit_ctrl(ctrl); - nvme_put_ctrl(ctrl); } static void nvme_delete_ctrl_work(struct work_struct *work) @@ -192,21 +191,16 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_delete_ctrl); -static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) +static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) { - int ret = 0; - /* * Keep a reference until nvme_do_delete_ctrl() complete, * since ->delete_ctrl can free the controller. */ nvme_get_ctrl(ctrl); - if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) - ret = -EBUSY; - if (!ret) + if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) nvme_do_delete_ctrl(ctrl); nvme_put_ctrl(ctrl); - return ret; } static inline bool nvme_ns_has_pi(struct nvme_ns *ns) @@ -291,11 +285,8 @@ void nvme_complete_rq(struct request *req) nvme_req(req)->ctrl->comp_seen = true; if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - } if (!blk_queue_dying(req->q)) { nvme_retry_req(req); @@ -1055,6 +1046,43 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) return error; } +static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, + struct nvme_ns_id_desc *cur) +{ + const char *warn_str = "ctrl returned bogus length:"; + void *data = cur; + + switch (cur->nidt) { + case NVME_NIDT_EUI64: + if (cur->nidl != NVME_NIDT_EUI64_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", + warn_str, cur->nidl); + return -1; + } + memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); + return NVME_NIDT_EUI64_LEN; + case NVME_NIDT_NGUID: + if (cur->nidl != NVME_NIDT_NGUID_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", + warn_str, cur->nidl); + return -1; + } + memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); + return NVME_NIDT_NGUID_LEN; + case NVME_NIDT_UUID: + if (cur->nidl != NVME_NIDT_UUID_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", + warn_str, cur->nidl); + return -1; + } + uuid_copy(&ids->uuid, data + sizeof(*cur)); + return NVME_NIDT_UUID_LEN; + default: + /* Skip unknown types */ + return cur->nidl; + } +} + static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_ns_ids *ids) { @@ -1074,8 +1102,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, NVME_IDENTIFY_DATA_SIZE); - if (status) + if (status) { + dev_warn(ctrl->device, + "Identify Descriptors failed (%d)\n", status); + /* + * Don't treat an error as fatal, as we potentially already + * have a NGUID or EUI-64. + */ + if (status > 0) + status = 0; goto free_data; + } for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { struct nvme_ns_id_desc *cur = data + pos; @@ -1083,42 +1120,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (cur->nidl == 0) break; - switch (cur->nidt) { - case NVME_NIDT_EUI64: - if (cur->nidl != NVME_NIDT_EUI64_LEN) { - dev_warn(ctrl->device, - "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", - cur->nidl); - goto free_data; - } - len = NVME_NIDT_EUI64_LEN; - memcpy(ids->eui64, data + pos + sizeof(*cur), len); - break; - case NVME_NIDT_NGUID: - if (cur->nidl != NVME_NIDT_NGUID_LEN) { - dev_warn(ctrl->device, - "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", - cur->nidl); - goto free_data; - } - len = NVME_NIDT_NGUID_LEN; - memcpy(ids->nguid, data + pos + sizeof(*cur), len); - break; - case NVME_NIDT_UUID: - if (cur->nidl != NVME_NIDT_UUID_LEN) { - dev_warn(ctrl->device, - "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", - cur->nidl); - goto free_data; - } - len = NVME_NIDT_UUID_LEN; - uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); - break; - default: - /* Skip unknown types */ - len = cur->nidl; - break; - } + len = nvme_process_ns_desc(ctrl, ids, cur); + if (len < 0) + goto free_data; len += sizeof(*cur); } @@ -1165,8 +1169,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { + union nvme_result res = { 0 }; struct nvme_command c; - union nvme_result res; int ret; memset(&c, 0, sizeof(c)); @@ -1584,6 +1588,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, return ret; } +#ifdef CONFIG_COMPAT +struct nvme_user_io32 { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +} __attribute__((__packed__)); + +#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) + +static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + /* + * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO + * between 32 bit programs and 64 bit kernel. + * The cause is that the results of sizeof(struct nvme_user_io), + * which is used to define NVME_IOCTL_SUBMIT_IO, + * are not same between 32 bit compiler and 64 bit compiler. + * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling + * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs. + * Other IOCTL numbers are same between 32 bit and 64 bit. + * So there is nothing to do regarding to other IOCTL numbers. + */ + if (cmd == NVME_IOCTL_SUBMIT_IO32) + return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg); + + return nvme_ioctl(bdev, mode, cmd, arg); +} +#else +#define nvme_compat_ioctl NULL +#endif /* CONFIG_COMPAT */ + static int nvme_open(struct block_device *bdev, fmode_t mode) { struct nvme_ns *ns = bdev->bd_disk->private_data; @@ -1721,26 +1766,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, struct nvme_id_ns *id, struct nvme_ns_ids *ids) { - int ret = 0; - memset(ids, 0, sizeof(*ids)); if (ctrl->vs >= NVME_VS(1, 1, 0)) memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); if (ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); - if (ctrl->vs >= NVME_VS(1, 3, 0)) { - /* Don't treat error as fatal we potentially - * already have a NGUID or EUI-64 - */ - ret = nvme_identify_ns_descs(ctrl, nsid, ids); - if (ret) - dev_warn(ctrl->device, - "Identify Descriptors failed (%d)\n", ret); - if (ret > 0) - ret = 0; - } - return ret; + if (ctrl->vs >= NVME_VS(1, 3, 0)) + return nvme_identify_ns_descs(ctrl, nsid, ids); + return 0; } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -1810,7 +1844,7 @@ static void nvme_update_disk_info(struct gendisk *disk, ns->lba_shift > PAGE_SHIFT) capacity = 0; - set_capacity(disk, capacity); + set_capacity_revalidate_and_notify(disk, capacity, false); nvme_config_discard(disk, ns); nvme_config_write_zeroes(disk, ns); @@ -2027,7 +2061,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit); static const struct block_device_operations nvme_fops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, @@ -2055,7 +2089,7 @@ const struct block_device_operations nvme_ns_head_ops = { .open = nvme_ns_head_open, .release = nvme_ns_head_release, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, .getgeo = nvme_getgeo, .pr_ops = &nvme_pr_ops, }; @@ -2074,13 +2108,13 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) if ((csts & NVME_CSTS_RDY) == bit) break; - msleep(100); + usleep_range(1000, 2000); if (fatal_signal_pending(current)) return -EINTR; if (time_after(jiffies, timeout)) { dev_err(ctrl->device, - "Device not ready; aborting %s\n", enabled ? - "initialisation" : "reset"); + "Device not ready; aborting %s, CSTS=0x%x\n", + enabled ? "initialisation" : "reset", csts); return -ENODEV; } } @@ -2591,8 +2625,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, lockdep_assert_held(&nvme_subsystems_lock); list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { - if (tmp->state == NVME_CTRL_DELETING || - tmp->state == NVME_CTRL_DEAD) + if (nvme_state_terminal(tmp)) continue; if (tmp->cntlid == ctrl->cntlid) { @@ -3193,6 +3226,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + /* Can't delete non-created controllers */ + if (!ctrl->created) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3242,6 +3279,26 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, } static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); +static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); +} +static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); + +static ssize_t nvme_sysfs_show_hostid(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); +} +static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); + static ssize_t nvme_sysfs_show_address(struct device *dev, struct device_attribute *attr, char *buf) @@ -3267,6 +3324,8 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_numa_node.attr, &dev_attr_queue_count.attr, &dev_attr_sqsize.attr, + &dev_attr_hostnqn.attr, + &dev_attr_hostid.attr, NULL }; @@ -3280,6 +3339,10 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, return 0; if (a == &dev_attr_address.attr && !ctrl->ops->get_address) return 0; + if (a == &dev_attr_hostnqn.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_hostid.attr && !ctrl->opts) + return 0; return a->mode; } @@ -3294,7 +3357,7 @@ static const struct attribute_group *nvme_dev_attr_groups[] = { NULL, }; -static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, +static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, unsigned nsid) { struct nvme_ns_head *h; @@ -3327,7 +3390,8 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, } static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, - unsigned nsid, struct nvme_id_ns *id) + unsigned nsid, struct nvme_id_ns *id, + struct nvme_ns_ids *ids) { struct nvme_ns_head *head; size_t size = sizeof(*head); @@ -3350,12 +3414,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, goto out_ida_remove; head->subsys = ctrl->subsys; head->ns_id = nsid; + head->ids = *ids; kref_init(&head->ref); - ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); - if (ret) - goto out_cleanup_srcu; - ret = __nvme_check_ids(ctrl->subsys, head); if (ret) { dev_err(ctrl->device, @@ -3390,24 +3451,23 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, struct nvme_ctrl *ctrl = ns->ctrl; bool is_shared = id->nmic & (1 << 0); struct nvme_ns_head *head = NULL; + struct nvme_ns_ids ids; int ret = 0; + ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); + if (ret) + goto out; + mutex_lock(&ctrl->subsys->lock); if (is_shared) - head = __nvme_find_ns_head(ctrl->subsys, nsid); + head = nvme_find_ns_head(ctrl->subsys, nsid); if (!head) { - head = nvme_alloc_ns_head(ctrl, nsid, id); + head = nvme_alloc_ns_head(ctrl, nsid, id, &ids); if (IS_ERR(head)) { ret = PTR_ERR(head); goto out_unlock; } } else { - struct nvme_ns_ids ids; - - ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); - if (ret) - goto out_unlock; - if (!nvme_ns_ids_equal(&head->ids, &ids)) { dev_err(ctrl->device, "IDs don't match for shared namespace %d\n", @@ -3422,6 +3482,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, out_unlock: mutex_unlock(&ctrl->subsys->lock); +out: if (ret > 0) ret = blk_status_to_errno(nvme_error_status(ret)); return ret; @@ -3480,7 +3541,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) return 0; } -static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) +static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns; struct gendisk *disk; @@ -3490,13 +3551,11 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) - return -ENOMEM; + return; ns->queue = blk_mq_init_queue(ctrl->tagset); - if (IS_ERR(ns->queue)) { - ret = PTR_ERR(ns->queue); + if (IS_ERR(ns->queue)) goto out_free_ns; - } if (ctrl->opts && ctrl->opts->data_digest) ns->queue->backing_dev_info->capabilities @@ -3519,10 +3578,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (ret) goto out_free_queue; - if (id->ncap == 0) { - ret = -EINVAL; + if (id->ncap == 0) /* no namespace (legacy quirk) */ goto out_free_id; - } ret = nvme_init_ns_head(ns, nsid, id); if (ret) @@ -3531,10 +3588,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_set_disk_name(disk_name, ns, ctrl, &flags); disk = alloc_disk_node(0, node); - if (!disk) { - ret = -ENOMEM; + if (!disk) goto out_unlink_ns; - } disk->fops = &nvme_fops; disk->private_data = ns; @@ -3565,7 +3620,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); kfree(id); - return 0; + return; out_put_disk: put_disk(ns->disk); out_unlink_ns: @@ -3579,9 +3634,6 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) blk_cleanup_queue(ns->queue); out_free_ns: kfree(ns); - if (ret > 0) - ret = blk_status_to_errno(nvme_error_status(ret)); - return ret; } static void nvme_ns_remove(struct nvme_ns *ns) @@ -3987,6 +4039,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) nvme_queue_scan(ctrl); nvme_start_queues(ctrl); } + ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl); @@ -3995,6 +4048,7 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); + nvme_put_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); @@ -4077,6 +4131,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, if (ret) goto out_release_instance; + nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); ctrl->cdev.owner = ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); @@ -4095,6 +4150,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, return 0; out_free_name: + nvme_put_ctrl(ctrl); kfree_const(ctrl->device->kobj.name); out_release_instance: ida_simple_remove(&nvme_instance_ida, ctrl->instance); @@ -4299,6 +4355,7 @@ static void __exit nvme_core_exit(void) destroy_workqueue(nvme_delete_wq); destroy_workqueue(nvme_reset_wq); destroy_workqueue(nvme_wq); + ida_destroy(&nvme_instance_ida); } MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 74b8818ac9a1..2a6c8190eeb7 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -105,14 +105,14 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) int len = 0; if (ctrl->opts->mask & NVMF_OPT_TRADDR) - len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr); + len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr); if (ctrl->opts->mask & NVMF_OPT_TRSVCID) - len += snprintf(buf + len, size - len, "%strsvcid=%s", + len += scnprintf(buf + len, size - len, "%strsvcid=%s", (len) ? "," : "", ctrl->opts->trsvcid); if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) - len += snprintf(buf + len, size - len, "%shost_traddr=%s", + len += scnprintf(buf + len, size - len, "%shost_traddr=%s", (len) ? "," : "", ctrl->opts->host_traddr); - len += snprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); return len; } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 5a70ac395d53..a8bf2fb1287b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3181,10 +3181,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto fail_ctrl; } - nvme_get_ctrl(&ctrl->ctrl); - if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { - nvme_put_ctrl(&ctrl->ctrl); dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule initial connect\n", ctrl->cnum); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 797c18337d96..61bf87592570 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -64,17 +64,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags; - spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -103,15 +98,17 @@ void nvme_failover_req(struct request *req) nvme_mpath_clear_current_path(ns); break; default: - /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. - */ - nvme_reset_ctrl(ns->ctrl); - break; + /* This was a non-ANA error so follow the normal error path. */ + return false; } + spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) @@ -377,11 +374,10 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) return 0; - q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node); + q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node); if (!q) goto out; q->queuedata = head; - blk_queue_make_request(q, nvme_ns_head_make_request); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); @@ -715,6 +711,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } INIT_WORK(&ctrl->ana_work, nvme_ana_work); + kfree(ctrl->ana_log_buf); ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); if (!ctrl->ana_log_buf) { error = -ENOMEM; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1024fec7914c..2e04a36296d9 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -259,6 +259,7 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; + bool created; #ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */ @@ -550,7 +551,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -599,8 +600,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9c80f9f08149..4e79e412b276 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -971,39 +971,25 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) nvme_end_request(req, cqe->status, cqe->result); } -static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) -{ - while (start != end) { - nvme_handle_cqe(nvmeq, start); - if (++start == nvmeq->q_depth) - start = 0; - } -} - static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - if (nvmeq->cq_head == nvmeq->q_depth - 1) { + if (++nvmeq->cq_head == nvmeq->q_depth) { nvmeq->cq_head = 0; - nvmeq->cq_phase = !nvmeq->cq_phase; - } else { - nvmeq->cq_head++; + nvmeq->cq_phase ^= 1; } } -static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, - u16 *end, unsigned int tag) +static inline int nvme_process_cq(struct nvme_queue *nvmeq) { int found = 0; - *start = nvmeq->cq_head; while (nvme_cqe_pending(nvmeq)) { - if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) - found++; + found++; + nvme_handle_cqe(nvmeq, nvmeq->cq_head); nvme_update_cq_head(nvmeq); } - *end = nvmeq->cq_head; - if (*start != *end) + if (found) nvme_ring_cq_doorbell(nvmeq); return found; } @@ -1012,21 +998,16 @@ static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; irqreturn_t ret = IRQ_NONE; - u16 start, end; /* * The rmb/wmb pair ensures we see all updates from a previous run of * the irq handler, even if that was on another CPU. */ rmb(); - nvme_process_cq(nvmeq, &start, &end, -1); + if (nvme_process_cq(nvmeq)) + ret = IRQ_HANDLED; wmb(); - if (start != end) { - nvme_complete_cqes(nvmeq, start, end); - return IRQ_HANDLED; - } - return ret; } @@ -1039,48 +1020,32 @@ static irqreturn_t nvme_irq_check(int irq, void *data) } /* - * Poll for completions any queue, including those not dedicated to polling. + * Poll for completions for any interrupt driven queue * Can be called from any context. */ -static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) +static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) { struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); - u16 start, end; - int found; - /* - * For a poll queue we need to protect against the polling thread - * using the CQ lock. For normal interrupt driven threads we have - * to disable the interrupt to avoid racing with it. - */ - if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) { - spin_lock(&nvmeq->cq_poll_lock); - found = nvme_process_cq(nvmeq, &start, &end, tag); - spin_unlock(&nvmeq->cq_poll_lock); - } else { - disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); - found = nvme_process_cq(nvmeq, &start, &end, tag); - enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); - } + WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); - nvme_complete_cqes(nvmeq, start, end); - return found; + disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + nvme_process_cq(nvmeq); + enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } static int nvme_poll(struct blk_mq_hw_ctx *hctx) { struct nvme_queue *nvmeq = hctx->driver_data; - u16 start, end; bool found; if (!nvme_cqe_pending(nvmeq)) return 0; spin_lock(&nvmeq->cq_poll_lock); - found = nvme_process_cq(nvmeq, &start, &end, -1); + found = nvme_process_cq(nvmeq); spin_unlock(&nvmeq->cq_poll_lock); - nvme_complete_cqes(nvmeq, start, end); return found; } @@ -1255,7 +1220,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) /* * Did we miss an interrupt? */ - if (nvme_poll_irqdisable(nvmeq, req->tag)) { + if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) + nvme_poll(req->mq_hctx); + else + nvme_poll_irqdisable(nvmeq); + + if (blk_mq_request_completed(req)) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid); @@ -1398,7 +1368,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) else nvme_disable_ctrl(&dev->ctrl); - nvme_poll_irqdisable(nvmeq, -1); + nvme_poll_irqdisable(nvmeq); } /* @@ -1409,13 +1379,10 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) */ static void nvme_reap_pending_cqes(struct nvme_dev *dev) { - u16 start, end; int i; - for (i = dev->ctrl.queue_count - 1; i > 0; i--) { - nvme_process_cq(&dev->queues[i], &start, &end, -1); - nvme_complete_cqes(&dev->queues[i], start, end); - } + for (i = dev->ctrl.queue_count - 1; i > 0; i--) + nvme_process_cq(&dev->queues[i]); } static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, @@ -2503,13 +2470,13 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) struct nvme_dev *dev = to_nvme_dev(ctrl); nvme_dbbuf_dma_free(dev); - put_device(dev->dev); nvme_free_tagset(dev); if (dev->ctrl.admin_q) blk_put_queue(dev->ctrl.admin_q); - kfree(dev->queues); free_opal_dev(dev->ctrl.opal_dev); mempool_destroy(dev->iod_mempool); + put_device(dev->dev); + kfree(dev->queues); kfree(dev); } @@ -2689,7 +2656,7 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); - return snprintf(buf, size, "%s", dev_name(&pdev->dev)); + return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { @@ -2747,6 +2714,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) return NVME_QUIRK_NO_APST; + } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || + pdev->device == 0xa808 || pdev->device == 0xa809)) || + (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { + /* + * Forcing to use host managed nvme power settings for + * lowest idle power with quick resume latency on + * Samsung and Toshiba SSDs based on suspend behavior + * on Coffee Lake board for LENOVO C640 + */ + if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && + dmi_match(DMI_BOARD_NAME, "LNVNB161216")) + return NVME_QUIRK_SIMPLE_SUSPEND; } return 0; @@ -2823,7 +2802,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); nvme_reset_ctrl(&dev->ctrl); - nvme_get_ctrl(&dev->ctrl); async_schedule(nvme_async_probe, dev); return 0; @@ -2895,10 +2873,9 @@ static void nvme_remove(struct pci_dev *pdev) nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); - nvme_uninit_ctrl(&dev->ctrl); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); - nvme_put_ctrl(&dev->ctrl); + nvme_uninit_ctrl(&dev->ctrl); } #ifdef CONFIG_PM_SLEEP @@ -3109,7 +3086,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), .driver_data = NVME_QUIRK_SINGLE_VECTOR | diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e85c5cacefd..86603d9b0cef 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -850,9 +850,11 @@ out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); out_free_async_qe: - nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); - ctrl->async_event_sqe.data = NULL; + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; @@ -1022,8 +1024,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); if (!changed) { - /* state change failure is ok if we're in DELETING state */ + /* + * state change failure is ok if we're in DELETING state, + * unless we're during creation of a new controller to + * avoid races with teardown flow. + */ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); + WARN_ON_ONCE(new); ret = -EINVAL; goto destroy_io; } @@ -2043,8 +2050,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); - nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 49d4373b84eb..0ef14f0fad86 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -20,6 +20,16 @@ struct nvme_tcp_queue; +/* Define the socket priority to use for connections were it is desirable + * that the NIC consider performing optimized packet processing or filtering. + * A non-zero value being sufficient to indicate general consideration of any + * possible optimization. Making it a module param allows for alternative + * values that may be unique for some NIC implementations. + */ +static int so_priority; +module_param(so_priority, int, 0644); +MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); + enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, @@ -1017,8 +1027,15 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) if (req->state == NVME_TCP_SEND_DDGST) ret = nvme_tcp_try_send_ddgst(req); done: - if (ret == -EAGAIN) + if (ret == -EAGAIN) { ret = 0; + } else if (ret < 0) { + dev_err(queue->ctrl->ctrl.device, + "failed to send request %d\n", ret); + if (ret != -EPIPE && ret != -ECONNRESET) + nvme_tcp_fail_request(queue->request); + nvme_tcp_done_send_req(queue); + } return ret; } @@ -1049,25 +1066,16 @@ static void nvme_tcp_io_work(struct work_struct *w) int result; result = nvme_tcp_try_send(queue); - if (result > 0) { + if (result > 0) pending = true; - } else if (unlikely(result < 0)) { - dev_err(queue->ctrl->ctrl.device, - "failed to send request %d\n", result); - - /* - * Fail the request unless peer closed the connection, - * in which case error recovery flow will complete all. - */ - if ((result != -EPIPE) && (result != -ECONNRESET)) - nvme_tcp_fail_request(queue->request); - nvme_tcp_done_send_req(queue); - return; - } + else if (unlikely(result < 0)) + break; result = nvme_tcp_try_recv(queue); if (result > 0) pending = true; + else if (unlikely(result < 0)) + break; if (!pending) return; @@ -1248,13 +1256,67 @@ free_icreq: return ret; } +static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) +{ + return nvme_tcp_queue_id(queue) == 0; +} + +static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; +} + +static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + !nvme_tcp_default_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + + ctrl->io_queues[HCTX_TYPE_READ]; +} + +static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + !nvme_tcp_default_queue(queue) && + !nvme_tcp_read_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + + ctrl->io_queues[HCTX_TYPE_READ] + + ctrl->io_queues[HCTX_TYPE_POLL]; +} + +static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + int n = 0; + + if (nvme_tcp_default_queue(queue)) + n = qid - 1; + else if (nvme_tcp_read_queue(queue)) + n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; + else if (nvme_tcp_poll_queue(queue)) + n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - + ctrl->io_queues[HCTX_TYPE_READ] - 1; + queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); +} + static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, size_t queue_size) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct linger sol = { .l_onoff = 1, .l_linger = 0 }; - int ret, opt, rcv_pdu_size, n; + int ret, opt, rcv_pdu_size; queue->ctrl = ctrl; INIT_LIST_HEAD(&queue->send_list); @@ -1309,6 +1371,17 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, goto err_sock; } + if (so_priority > 0) { + ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY, + (char *)&so_priority, sizeof(so_priority)); + if (ret) { + dev_err(ctrl->ctrl.device, + "failed to set SO_PRIORITY sock opt, ret %d\n", + ret); + goto err_sock; + } + } + /* Set socket type of service */ if (nctrl->opts->tos >= 0) { opt = nctrl->opts->tos; @@ -1322,11 +1395,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } queue->sock->sk->sk_allocation = GFP_ATOMIC; - if (!qid) - n = 0; - else - n = (qid - 1) % num_online_cpus(); - queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); + nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; queue->data_remaining = 0; queue->ddgst_remaining = 0; @@ -1861,8 +1930,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) } if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { - /* state change failure is ok if we're in DELETING state */ + /* + * state change failure is ok if we're in DELETING state, + * unless we're during creation of a new controller to + * avoid races with teardown flow. + */ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); + WARN_ON_ONCE(new); ret = -EINVAL; goto destroy_io; } @@ -2359,8 +2433,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); - nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); mutex_unlock(&nvme_tcp_ctrl_mutex); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 72a7e41f3018..9d6f75cfa77c 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/rculist.h> +#include <linux/part_stat.h> #include <generated/utsrelease.h> #include <asm/unaligned.h> @@ -322,12 +323,25 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } +static void nvmet_id_set_model_number(struct nvme_id_ctrl *id, + struct nvmet_subsys *subsys) +{ + const char *model = NVMET_DEFAULT_CTRL_MODEL; + struct nvmet_subsys_model *subsys_model; + + rcu_read_lock(); + subsys_model = rcu_dereference(subsys->model); + if (subsys_model) + model = subsys_model->number; + memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' '); + rcu_read_unlock(); +} + static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; u16 status = 0; - const char model[] = "Linux"; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { @@ -342,7 +356,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) memset(id->sn, ' ', sizeof(id->sn)); bin2hex(id->sn, &ctrl->subsys->serial, min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); - memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); + nvmet_id_set_model_number(id, ctrl->subsys); memcpy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE), ' '); @@ -356,8 +370,12 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* we support multiple ports, multiples hosts and ANA: */ id->cmic = (1 << 0) | (1 << 1) | (1 << 3); - /* no limit on data transfer sizes for now */ - id->mdts = 0; + /* Limit MDTS according to transport capability */ + if (ctrl->ops->get_mdts) + id->mdts = ctrl->ops->get_mdts(ctrl); + else + id->mdts = 0; + id->cntlid = cpu_to_le16(ctrl->cntlid); id->ver = cpu_to_le32(ctrl->subsys->ver); @@ -720,13 +738,22 @@ static void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); u16 status = 0; + u16 nsqr; + u16 ncqr; if (!nvmet_check_data_len(req, 0)) return; switch (cdw10 & 0xff) { case NVME_FEAT_NUM_QUEUES: + ncqr = (cdw11 >> 16) & 0xffff; + nsqr = cdw11 & 0xffff; + if (ncqr == 0xffff || nsqr == 0xffff) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + break; + } nvmet_set_result(req, (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); break; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 98613a45bd3b..7aa10788b7c8 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -395,14 +395,12 @@ static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, struct nvmet_subsys *subsys = ns->subsys; int ret = 0; - mutex_lock(&subsys->lock); if (ns->enabled) { ret = -EBUSY; goto out_unlock; } - if (uuid_parse(page, &ns->uuid)) ret = -EINVAL; @@ -815,10 +813,10 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, (int)NVME_MAJOR(subsys->ver), (int)NVME_MINOR(subsys->ver), (int)NVME_TERTIARY(subsys->ver)); - else - return snprintf(page, PAGE_SIZE, "%d.%d\n", - (int)NVME_MAJOR(subsys->ver), - (int)NVME_MINOR(subsys->ver)); + + return snprintf(page, PAGE_SIZE, "%d.%d\n", + (int)NVME_MAJOR(subsys->ver), + (int)NVME_MINOR(subsys->ver)); } static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, @@ -828,7 +826,6 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, int major, minor, tertiary = 0; int ret; - ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); if (ret != 2 && ret != 3) return -EINVAL; @@ -852,20 +849,151 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, const char *page, size_t count) { - struct nvmet_subsys *subsys = to_subsys(item); + u64 serial; + + if (sscanf(page, "%llx\n", &serial) != 1) + return -EINVAL; down_write(&nvmet_config_sem); - sscanf(page, "%llx\n", &subsys->serial); + to_subsys(item)->serial = serial; up_write(&nvmet_config_sem); return count; } CONFIGFS_ATTR(nvmet_subsys_, attr_serial); +static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); +} + +static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, + const char *page, size_t cnt) +{ + u16 cntlid_min; + + if (sscanf(page, "%hu\n", &cntlid_min) != 1) + return -EINVAL; + + if (cntlid_min == 0) + return -EINVAL; + + down_write(&nvmet_config_sem); + if (cntlid_min >= to_subsys(item)->cntlid_max) + goto out_unlock; + to_subsys(item)->cntlid_min = cntlid_min; + up_write(&nvmet_config_sem); + return cnt; + +out_unlock: + up_write(&nvmet_config_sem); + return -EINVAL; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); + +static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); +} + +static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, + const char *page, size_t cnt) +{ + u16 cntlid_max; + + if (sscanf(page, "%hu\n", &cntlid_max) != 1) + return -EINVAL; + + if (cntlid_max == 0) + return -EINVAL; + + down_write(&nvmet_config_sem); + if (cntlid_max <= to_subsys(item)->cntlid_min) + goto out_unlock; + to_subsys(item)->cntlid_max = cntlid_max; + up_write(&nvmet_config_sem); + return cnt; + +out_unlock: + up_write(&nvmet_config_sem); + return -EINVAL; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); + +static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, + char *page) +{ + struct nvmet_subsys *subsys = to_subsys(item); + struct nvmet_subsys_model *subsys_model; + char *model = NVMET_DEFAULT_CTRL_MODEL; + int ret; + + rcu_read_lock(); + subsys_model = rcu_dereference(subsys->model); + if (subsys_model) + model = subsys_model->number; + ret = snprintf(page, PAGE_SIZE, "%s\n", model); + rcu_read_unlock(); + + return ret; +} + +/* See Section 1.5 of NVMe 1.4 */ +static bool nvmet_is_ascii(const char c) +{ + return c >= 0x20 && c <= 0x7e; +} + +static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + struct nvmet_subsys_model *new_model; + char *new_model_number; + int pos = 0, len; + + len = strcspn(page, "\n"); + if (!len) + return -EINVAL; + + for (pos = 0; pos < len; pos++) { + if (!nvmet_is_ascii(page[pos])) + return -EINVAL; + } + + new_model_number = kstrndup(page, len, GFP_KERNEL); + if (!new_model_number) + return -ENOMEM; + + new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL); + if (!new_model) { + kfree(new_model_number); + return -ENOMEM; + } + memcpy(new_model->number, new_model_number, len); + + down_write(&nvmet_config_sem); + mutex_lock(&subsys->lock); + new_model = rcu_replace_pointer(subsys->model, new_model, + mutex_is_locked(&subsys->lock)); + mutex_unlock(&subsys->lock); + up_write(&nvmet_config_sem); + + kfree_rcu(new_model, rcuhead); + + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_model); + static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_allow_any_host, &nvmet_subsys_attr_attr_version, &nvmet_subsys_attr_attr_serial, + &nvmet_subsys_attr_attr_cntlid_min, + &nvmet_subsys_attr_attr_cntlid_max, + &nvmet_subsys_attr_attr_model, NULL, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 576de773b4db..b685f99d56a1 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1289,8 +1289,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!ctrl->sqs) goto out_free_cqs; + if (subsys->cntlid_min > subsys->cntlid_max) + goto out_free_cqs; + ret = ida_simple_get(&cntlid_ida, - NVME_CNTLID_MIN, NVME_CNTLID_MAX, + subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; @@ -1438,7 +1441,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, kfree(subsys); return ERR_PTR(-ENOMEM); } - + subsys->cntlid_min = NVME_CNTLID_MIN; + subsys->cntlid_max = NVME_CNTLID_MAX; kref_init(&subsys->ref); mutex_init(&subsys->lock); @@ -1457,6 +1461,7 @@ static void nvmet_subsys_free(struct kref *ref) WARN_ON_ONCE(!list_empty(&subsys->namespaces)); kfree(subsys->subsysnqn); + kfree_rcu(subsys->model, rcuhead); kfree(subsys); } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 4df4ebde208a..0d54e730cbf2 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -485,7 +485,6 @@ out_destroy_admin: out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); } static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { @@ -618,8 +617,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); - nvme_get_ctrl(&ctrl->ctrl); - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index eda28b22a2c8..421dff3ea143 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -23,6 +23,7 @@ #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 #define NVMET_NO_ERROR_LOC ((u16)-1) +#define NVMET_DEFAULT_CTRL_MODEL "Linux" /* * Supported optional AENs: @@ -202,6 +203,11 @@ struct nvmet_ctrl { struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; }; +struct nvmet_subsys_model { + struct rcu_head rcuhead; + char number[]; +}; + struct nvmet_subsys { enum nvme_subsys_type type; @@ -211,6 +217,8 @@ struct nvmet_subsys { struct list_head namespaces; unsigned int nr_namespaces; unsigned int max_nsid; + u16 cntlid_min; + u16 cntlid_max; struct list_head ctrls; @@ -227,6 +235,8 @@ struct nvmet_subsys { struct config_group namespaces_group; struct config_group allowed_hosts_group; + + struct nvmet_subsys_model __rcu *model; }; static inline struct nvmet_subsys *to_subsys(struct config_item *item) @@ -279,6 +289,7 @@ struct nvmet_fabrics_ops { struct nvmet_port *port, char *traddr); u16 (*install_queue)(struct nvmet_sq *nvme_sq); void (*discovery_chg)(struct nvmet_port *port); + u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); }; #define NVMET_MAX_INLINE_BIOVEC 8 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 37d262a65877..9e1b8c61f54e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -31,6 +31,9 @@ #define NVMET_RDMA_MAX_INLINE_SGE 4 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) +/* Assume mpsmin == device_page_size == 4KB */ +#define NVMET_RDMA_MAX_MDTS 8 + struct nvmet_rdma_cmd { struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; struct ib_cqe cqe; @@ -975,7 +978,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) { struct ib_qp_init_attr qp_attr; struct nvmet_rdma_device *ndev = queue->dev; - int comp_vector, nr_cqe, ret, i; + int comp_vector, nr_cqe, ret, i, factor; /* * Spread the io queues across completion vectors, @@ -1008,7 +1011,9 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) qp_attr.qp_type = IB_QPT_RC; /* +1 for drain */ qp_attr.cap.max_send_wr = queue->send_queue_size + 1; - qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; + factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, + 1 << NVMET_RDMA_MAX_MDTS); + qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, ndev->device->attrs.max_send_sge); @@ -1602,6 +1607,11 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, } } +static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) +{ + return NVMET_RDMA_MAX_MDTS; +} + static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, @@ -1612,6 +1622,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, + .get_mdts = nvmet_rdma_get_mdts, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index af674fc0bb1e..f0da04e960f4 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -19,6 +19,16 @@ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) +/* Define the socket priority to use for connections were it is desirable + * that the NIC consider performing optimized packet processing or filtering. + * A non-zero value being sufficient to indicate general consideration of any + * possible optimization. Making it a module param allows for alternative + * values that may be unique for some NIC implementations. + */ +static int so_priority; +module_param(so_priority, int, 0644); +MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); + #define NVMET_TCP_RECV_BUDGET 8 #define NVMET_TCP_SEND_BUDGET 8 #define NVMET_TCP_IO_WORK_BUDGET 64 @@ -515,7 +525,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) return 1; } -static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) +static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; int ret; @@ -523,9 +533,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) while (cmd->cur_sg) { struct page *page = sg_page(cmd->cur_sg); u32 left = cmd->cur_sg->length - cmd->offset; + int flags = MSG_DONTWAIT; + + if ((!last_in_batch && cmd->queue->send_list_len) || + cmd->wbytes_done + left < cmd->req.transfer_len || + queue->data_digest || !queue->nvme_sq.sqhd_disabled) + flags |= MSG_MORE; ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, - left, MSG_DONTWAIT | MSG_MORE); + left, flags); if (ret <= 0) return ret; @@ -616,7 +632,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) return 1; } -static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) +static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; @@ -626,6 +642,9 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) }; int ret; + if (!last_in_batch && cmd->queue->send_list_len) + msg.msg_flags |= MSG_MORE; + ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (unlikely(ret <= 0)) return ret; @@ -660,13 +679,13 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, } if (cmd->state == NVMET_TCP_SEND_DATA) { - ret = nvmet_try_send_data(cmd); + ret = nvmet_try_send_data(cmd, last_in_batch); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_DDGST) { - ret = nvmet_try_send_ddgst(cmd); + ret = nvmet_try_send_ddgst(cmd, last_in_batch); if (ret <= 0) goto done_send; } @@ -788,7 +807,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); - icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */ + icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; @@ -1433,6 +1452,13 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) if (ret) return ret; + if (so_priority > 0) { + ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY, + (char *)&so_priority, sizeof(so_priority)); + if (ret) + return ret; + } + /* Set socket type of service */ if (inet->rcv_tos > 0) { int tos = inet->rcv_tos; @@ -1622,6 +1648,15 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) goto err_sock; } + if (so_priority > 0) { + ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY, + (char *)&so_priority, sizeof(so_priority)); + if (ret) { + pr_err("failed to set SO_PRIORITY sock opt %d\n", ret); + goto err_sock; + } + } + ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, sizeof(port->addr)); if (ret) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 8270bbf505fb..9f982c0627a0 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -306,6 +306,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) rc = of_mdiobus_register_phy(mdio, child, addr); if (rc && rc != -ENODEV) goto unregister; + break; } } } diff --git a/drivers/of/property.c b/drivers/of/property.c index e851c57a15b0..f104f15b57fb 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1262,7 +1262,7 @@ static int of_link_property(struct device *dev, struct device_node *con_np, u32 dl_flags; if (dev->of_node == con_np) - dl_flags = DL_FLAG_AUTOPROBE_CONSUMER; + dl_flags = fw_devlink_get_flags(); else dl_flags = DL_FLAG_SYNC_STATE_ONLY; @@ -1299,15 +1299,9 @@ static int of_link_to_suppliers(struct device *dev, return ret; } -static bool of_devlink; -core_param(of_devlink, of_devlink, bool, 0); - static int of_fwnode_add_links(const struct fwnode_handle *fwnode, struct device *dev) { - if (!of_devlink) - return 0; - if (unlikely(!is_of_node(fwnode))) return 0; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index d20aabc26273..3a10e678c7f4 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -670,7 +670,7 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, * outbound memory @ 3GB). So instead it will start at the 1x * multiple of its size */ - if (!*rc_bar2_size || *rc_bar2_offset % *rc_bar2_size || + if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) || (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) { dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n", *rc_bar2_size, *rc_bar2_offset); diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 30fbe2ea6eab..aafd58da3a89 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c @@ -55,15 +55,13 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = { .need_resume = mid_pci_need_resume, }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - /* * This table should be in sync with the one in * arch/x86/platform/intel-mid/pwr.c. */ static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SALTWELL_MID), - ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL), {} }; diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6e3c04b46fb1..7876dc4b28f8 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -34,6 +34,7 @@ config PCIEAER config PCIEAER_INJECT tristate "PCI Express error injection support" depends on PCIEAER + select GENERIC_IRQ_INJECTION help This enables PCI Express Root Port Advanced Error Reporting (AER) software error injector. diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 6988fe7389b9..21cc3d3387f7 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/slab.h> @@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj) } pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", einj->cor_status, einj->uncor_status, pci_name(dev)); - local_irq_disable(); - generic_handle_irq(edev->irq); - local_irq_enable(); + ret = irq_inject_interrupt(edev->irq); } else { pci_err(rpdev, "AER device not found\n"); ret = -ENODEV; diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index a823b4b8ef8a..e69cac84b605 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -52,10 +52,11 @@ struct switchtec_user { enum mrpc_state state; - struct completion comp; + wait_queue_head_t cmd_comp; struct kref kref; struct list_head list; + bool cmd_done; u32 cmd; u32 status; u32 return_code; @@ -77,7 +78,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) stuser->stdev = stdev; kref_init(&stuser->kref); INIT_LIST_HEAD(&stuser->list); - init_completion(&stuser->comp); + init_waitqueue_head(&stuser->cmd_comp); stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); @@ -175,7 +176,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); - init_completion(&stuser->comp); + stuser->cmd_done = false; list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); @@ -222,7 +223,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, stuser->read_len); out: - complete_all(&stuser->comp); + stuser->cmd_done = true; + wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; @@ -529,10 +531,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { - if (!try_wait_for_completion(&stuser->comp)) + if (!stuser->cmd_done) return -EAGAIN; } else { - rc = wait_for_completion_interruptible(&stuser->comp); + rc = wait_event_interruptible(stuser->cmd_comp, + stuser->cmd_done); if (rc < 0) return rc; } @@ -580,7 +583,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; - poll_wait(filp, &stuser->comp.wait, wait); + poll_wait(filp, &stuser->cmd_comp, wait); poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) @@ -588,7 +591,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) mutex_unlock(&stdev->mrpc_mutex); - if (try_wait_for_completion(&stuser->comp)) + if (stuser->cmd_done) ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) @@ -1272,7 +1275,8 @@ static void stdev_kill(struct switchtec_dev *stdev) /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { - complete_all(&stuser->comp); + stuser->cmd_done = true; + wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); } diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index acce8781c456..f5c7a845cd7b 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -24,8 +24,6 @@ static int arm_pmu_acpi_register_irq(int cpu) int gsi, trigger; gicc = acpi_cpu_get_madt_gicc(cpu); - if (WARN_ON(!gicc)) - return -EINVAL; gsi = gicc->performance_interrupt; @@ -64,11 +62,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu) int gsi; gicc = acpi_cpu_get_madt_gicc(cpu); - if (!gicc) - return; gsi = gicc->performance_interrupt; - acpi_unregister_gsi(gsi); + if (gsi) + acpi_unregister_gsi(gsi); } #if IS_ENABLED(CONFIG_ARM_SPE_PMU) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 95dca2cb5265..90884d14f95f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -388,9 +388,10 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, if (enable) { /* - * must disable first, then enable again - * otherwise, cycle counter will not work - * if previous state is enabled. + * cycle counter is special which should firstly write 0 then + * write 1 into CLEAR bit to clear it. Other counters only + * need write 0 into CLEAR bit and it turns out to be 1 by + * hardware. Below enable flow is harmless for all counters. */ writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; @@ -398,7 +399,8 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, writel(val, pmu->base + reg); } else { /* Disable counter */ - writel(0, pmu->base + reg); + val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; + writel(val, pmu->base + reg); } } diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c index 1169f3e83a6f..b1c04f71a31d 100644 --- a/drivers/phy/allwinner/phy-sun50i-usb3.c +++ b/drivers/phy/allwinner/phy-sun50i-usb3.c @@ -49,7 +49,7 @@ #define SUNXI_LOS_BIAS(n) ((n) << 3) #define SUNXI_LOS_BIAS_MASK GENMASK(5, 3) #define SUNXI_TXVBOOSTLVL(n) ((n) << 0) -#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2) +#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0) struct sun50i_usb3_phy { struct phy *phy; diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c index 9065ffc85eb4..b26e30e1afaf 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c @@ -66,7 +66,7 @@ #define PHY_CTRL_R14 0x38 #define PHY_CTRL_R14_I_RDP_EN BIT(0) #define PHY_CTRL_R14_I_RPU_SW1_EN BIT(1) - #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(2, 3) + #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(3, 2) #define PHY_CTRL_R14_PG_RSTN BIT(4) #define PHY_CTRL_R14_I_C2L_DATA_16_8 BIT(5) #define PHY_CTRL_R14_I_C2L_ASSERT_SINGLE_EN_ZERO BIT(6) @@ -146,11 +146,17 @@ #define RESET_COMPLETE_TIME 1000 #define PLL_RESET_COMPLETE_TIME 100 +enum meson_soc_id { + MESON_SOC_G12A = 0, + MESON_SOC_A1, +}; + struct phy_meson_g12a_usb2_priv { struct device *dev; struct regmap *regmap; struct clk *clk; struct reset_control *reset; + int soc_id; }; static const struct regmap_config phy_meson_g12a_usb2_regmap_conf = { @@ -164,6 +170,7 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) { struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy); int ret; + unsigned int value; ret = reset_control_reset(priv->reset); if (ret) @@ -192,18 +199,22 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT2, 2) | FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT1, 9)); - regmap_write(priv->regmap, PHY_CTRL_R18, - FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) | - FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) | - FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) | - FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) | - FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) | - PHY_CTRL_R18_MPLL_ACG_RANGE); + value = FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) | + FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) | + FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) | + FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) | + FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) | + PHY_CTRL_R18_MPLL_ACG_RANGE; + + if (priv->soc_id == MESON_SOC_A1) + value |= PHY_CTRL_R18_MPLL_DCO_CLK_SEL; + + regmap_write(priv->regmap, PHY_CTRL_R18, value); udelay(PLL_RESET_COMPLETE_TIME); @@ -227,13 +238,24 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R20_USB2_BGR_VREF_4_0, 0) | FIELD_PREP(PHY_CTRL_R20_USB2_BGR_DBG_1_0, 0)); - regmap_write(priv->regmap, PHY_CTRL_R4, - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) | - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) | - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) | - PHY_CTRL_R4_TEST_BYPASS_MODE_EN | - FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) | - FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0)); + if (priv->soc_id == MESON_SOC_G12A) + regmap_write(priv->regmap, PHY_CTRL_R4, + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) | + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) | + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) | + PHY_CTRL_R4_TEST_BYPASS_MODE_EN | + FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) | + FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0)); + else if (priv->soc_id == MESON_SOC_A1) { + regmap_write(priv->regmap, PHY_CTRL_R21, + PHY_CTRL_R21_USB2_CAL_ACK_EN | + PHY_CTRL_R21_USB2_TX_STRG_PD | + FIELD_PREP(PHY_CTRL_R21_USB2_OTG_ACA_TRIM_1_0, 2)); + + /* Analog Settings */ + regmap_write(priv->regmap, PHY_CTRL_R13, + FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + } /* Tuning Disconnect Threshold */ regmap_write(priv->regmap, PHY_CTRL_R3, @@ -241,11 +263,13 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R3_HSDIC_REF, 1) | FIELD_PREP(PHY_CTRL_R3_DISC_THRESH, 3)); - /* Analog Settings */ - regmap_write(priv->regmap, PHY_CTRL_R14, 0); - regmap_write(priv->regmap, PHY_CTRL_R13, - PHY_CTRL_R13_UPDATE_PMA_SIGNALS | - FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + if (priv->soc_id == MESON_SOC_G12A) { + /* Analog Settings */ + regmap_write(priv->regmap, PHY_CTRL_R14, 0); + regmap_write(priv->regmap, PHY_CTRL_R13, + PHY_CTRL_R13_UPDATE_PMA_SIGNALS | + FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + } return 0; } @@ -286,6 +310,8 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); + priv->soc_id = (enum meson_soc_id)of_device_get_match_data(&pdev->dev); + priv->regmap = devm_regmap_init_mmio(dev, base, &phy_meson_g12a_usb2_regmap_conf); if (IS_ERR(priv->regmap)) @@ -321,8 +347,15 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) } static const struct of_device_id phy_meson_g12a_usb2_of_match[] = { - { .compatible = "amlogic,g12a-usb2-phy", }, - { }, + { + .compatible = "amlogic,g12a-usb2-phy", + .data = (void *)MESON_SOC_G12A, + }, + { + .compatible = "amlogic,a1-usb2-phy", + .data = (void *)MESON_SOC_A1, + }, + { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, phy_meson_g12a_usb2_of_match); diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c index 4710cfcc3037..18251f232172 100644 --- a/drivers/phy/broadcom/phy-brcm-sata.c +++ b/drivers/phy/broadcom/phy-brcm-sata.c @@ -186,29 +186,6 @@ enum sata_phy_ctrl_regs { PHY_CTRL_1_RESET = BIT(0), }; -static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port) -{ - struct brcm_sata_phy *priv = port->phy_priv; - u32 size = 0; - - switch (priv->version) { - case BRCM_SATA_PHY_STB_16NM: - case BRCM_SATA_PHY_STB_28NM: - case BRCM_SATA_PHY_IPROC_NS2: - case BRCM_SATA_PHY_DSL_28NM: - size = SATA_PCB_REG_28NM_SPACE_SIZE; - break; - case BRCM_SATA_PHY_STB_40NM: - size = SATA_PCB_REG_40NM_SPACE_SIZE; - break; - default: - dev_err(priv->dev, "invalid phy version\n"); - break; - } - - return priv->phy_base + (port->portnum * size); -} - static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port) { struct brcm_sata_phy *priv = port->phy_priv; @@ -226,19 +203,34 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port) return priv->ctrl_base + (port->portnum * size); } -static void brcm_sata_phy_wr(void __iomem *pcb_base, u32 bank, +static void brcm_sata_phy_wr(struct brcm_sata_port *port, u32 bank, u32 ofs, u32 msk, u32 value) { + struct brcm_sata_phy *priv = port->phy_priv; + void __iomem *pcb_base = priv->phy_base; u32 tmp; + if (priv->version == BRCM_SATA_PHY_STB_40NM) + bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE); + else + pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE); + writel(bank, pcb_base + SATA_PCB_BANK_OFFSET); tmp = readl(pcb_base + SATA_PCB_REG_OFFSET(ofs)); tmp = (tmp & msk) | value; writel(tmp, pcb_base + SATA_PCB_REG_OFFSET(ofs)); } -static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs) +static u32 brcm_sata_phy_rd(struct brcm_sata_port *port, u32 bank, u32 ofs) { + struct brcm_sata_phy *priv = port->phy_priv; + void __iomem *pcb_base = priv->phy_base; + + if (priv->version == BRCM_SATA_PHY_STB_40NM) + bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE); + else + pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE); + writel(bank, pcb_base + SATA_PCB_BANK_OFFSET); return readl(pcb_base + SATA_PCB_REG_OFFSET(ofs)); } @@ -250,16 +242,15 @@ static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs) static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port) { - void __iomem *base = brcm_sata_pcb_base(port); struct brcm_sata_phy *priv = port->phy_priv; u32 tmp; /* override the TX spread spectrum setting */ tmp = TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL | TXPMD_CONTROL1_TX_SSC_EN_FRC; - brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp); + brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp); /* set fixed min freq */ - brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2, + brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2, ~TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK, STB_FMIN_VAL_DEFAULT); @@ -271,7 +262,7 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port) tmp = STB_FMAX_VAL_DEFAULT; } - brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3, + brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3, ~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp); } @@ -280,7 +271,6 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port) static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port) { - void __iomem *base = brcm_sata_pcb_base(port); u32 tmp = 0, reg = 0; switch (port->rxaeq_mode) { @@ -301,8 +291,8 @@ static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port) break; } - brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, reg, ~tmp, tmp); - brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, reg, ~tmp, tmp); + brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, reg, ~tmp, tmp); + brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, reg, ~tmp, tmp); return 0; } @@ -316,18 +306,17 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port) static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) { - void __iomem *base = brcm_sata_pcb_base(port); u32 tmp, value; /* Reduce CP tail current to 1/16th of its default value */ - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141); /* Turn off CP tail current boost */ - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006); /* Set a specific AEQ equalizer value */ tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE; - brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ, + brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, AEQ_FRC_EQ, ~(tmp | AEQ_RFZ_FRC_VAL | AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT), tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT); @@ -337,7 +326,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) value = 0x52; else value = 0; - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1, ~RXPMD_RX_PPM_VAL_MASK, value); /* Set proportional loop bandwith Gen1/2/3 */ @@ -352,7 +341,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT | 1 << RXPMD_G2_CDR_PROP_BW_SHIFT | 1 << RXPMD_G3_CDR_PROB_BW_SHIFT; - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp, value); /* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */ @@ -365,7 +354,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) 1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT; else value = 0; - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW, ~tmp, value); /* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */ @@ -378,7 +367,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) 1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT; else value = 0; - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW, ~tmp, value); /* Set no guard band and clamp CDR */ @@ -387,11 +376,11 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) value = 0x51; else value = 0; - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1, ~tmp, RXPMD_MON_CORRECT_EN | value); /* Turn on/off SSC */ - brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN, + brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN, port->ssc_en ? TX_ACTRL5_SSC_EN : 0); return 0; @@ -411,7 +400,6 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port) { int try; unsigned int val; - void __iomem *base = brcm_sata_pcb_base(port); void __iomem *ctrl_base = brcm_sata_ctrl_base(port); struct device *dev = port->phy_priv->dev; @@ -421,24 +409,24 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port) val |= (0x4 << OOB_CTRL1_BURST_MIN_SHIFT); val |= (0x9 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT); val |= (0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT); - brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val); + brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val); val = 0x0; val |= (0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT); val |= (0x2 << OOB_CTRL2_BURST_CNT_SHIFT); val |= (0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT); - brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val); + brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val); /* Configure PHY PLL register bank 1 */ val = NS2_PLL1_ACTRL2_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val); val = NS2_PLL1_ACTRL3_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val); val = NS2_PLL1_ACTRL4_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val); /* Configure PHY BLOCK0 register bank */ /* Set oob_clk_sel to refclk/2 */ - brcm_sata_phy_wr(base, BLOCK0_REG_BANK, BLOCK0_SPARE, + brcm_sata_phy_wr(port, BLOCK0_REG_BANK, BLOCK0_SPARE, ~BLOCK0_SPARE_OOB_CLK_SEL_MASK, BLOCK0_SPARE_OOB_CLK_SEL_REFBY2); @@ -451,7 +439,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port) /* Wait for PHY PLL lock by polling pll_lock bit */ try = 50; while (try) { - val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK, + val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK, BLOCK0_XGXSSTATUS); if (val & BLOCK0_XGXSSTATUS_PLL_LOCK) break; @@ -471,9 +459,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port) static int brcm_nsp_sata_init(struct brcm_sata_port *port) { - struct brcm_sata_phy *priv = port->phy_priv; struct device *dev = port->phy_priv->dev; - void __iomem *base = priv->phy_base; unsigned int oob_bank; unsigned int val, try; @@ -490,36 +476,36 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port) val |= (0x06 << OOB_CTRL1_BURST_MIN_SHIFT); val |= (0x0f << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT); val |= (0x06 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT); - brcm_sata_phy_wr(base, oob_bank, OOB_CTRL1, 0x0, val); + brcm_sata_phy_wr(port, oob_bank, OOB_CTRL1, 0x0, val); val = 0x0; val |= (0x2e << OOB_CTRL2_RESET_IDLE_MAX_SHIFT); val |= (0x02 << OOB_CTRL2_BURST_CNT_SHIFT); val |= (0x16 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT); - brcm_sata_phy_wr(base, oob_bank, OOB_CTRL2, 0x0, val); + brcm_sata_phy_wr(port, oob_bank, OOB_CTRL2, 0x0, val); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL2, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL2, ~(PLL_ACTRL2_SELDIV_MASK << PLL_ACTRL2_SELDIV_SHIFT), 0x0c << PLL_ACTRL2_SELDIV_SHIFT); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CONTROL, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CONTROL, 0xff0, 0x4f0); val = PLLCONTROL_0_FREQ_DET_RESTART | PLLCONTROL_0_FREQ_MONITOR; - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, ~val, val); val = PLLCONTROL_0_SEQ_START; - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, ~val, 0); mdelay(10); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, ~val, val); /* Wait for pll_seq_done bit */ try = 50; while (--try) { - val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK, + val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK, BLOCK0_XGXSSTATUS); if (val & BLOCK0_XGXSSTATUS_PLL_LOCK) break; @@ -546,27 +532,25 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port) static int brcm_sr_sata_init(struct brcm_sata_port *port) { - struct brcm_sata_phy *priv = port->phy_priv; struct device *dev = port->phy_priv->dev; - void __iomem *base = priv->phy_base; unsigned int val, try; /* Configure PHY PLL register bank 1 */ val = SR_PLL1_ACTRL2_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val); val = SR_PLL1_ACTRL3_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val); val = SR_PLL1_ACTRL4_MAGIC; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val); /* Configure PHY PLL register bank 0 */ val = SR_PLL0_ACTRL6_MAGIC; - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val); + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val); /* Wait for PHY PLL lock by polling pll_lock bit */ try = 50; do { - val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK, + val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK, BLOCK0_XGXSSTATUS); if (val & BLOCK0_XGXSSTATUS_PLL_LOCK) break; @@ -581,7 +565,7 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port) } /* Invert Tx polarity */ - brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL0, + brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL0, ~TX_ACTRL0_TXPOL_FLIP, TX_ACTRL0_TXPOL_FLIP); /* Configure OOB control to handle 100MHz reference clock */ @@ -589,52 +573,51 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port) (0x4 << OOB_CTRL1_BURST_MIN_SHIFT) | (0x8 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT) | (0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT)); - brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val); + brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val); val = ((0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT) | (0x2 << OOB_CTRL2_BURST_CNT_SHIFT) | (0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT)); - brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val); + brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val); return 0; } static int brcm_dsl_sata_init(struct brcm_sata_port *port) { - void __iomem *base = brcm_sata_pcb_base(port); struct device *dev = port->phy_priv->dev; unsigned int try; u32 tmp; - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873); - brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000); + brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, 0, 0x3089); usleep_range(1000, 2000); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0, 0, 0x3088); usleep_range(1000, 2000); - brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0, + brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0, 0, 0x3000); - brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0, + brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0, 0, 0x3000); usleep_range(1000, 2000); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32); + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa); + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa); - brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64); + brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64); usleep_range(1000, 2000); /* Acquire PLL lock */ try = 50; while (try) { - tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK, + tmp = brcm_sata_phy_rd(port, BLOCK0_REG_BANK, BLOCK0_XGXSSTATUS); if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK) break; @@ -687,10 +670,9 @@ static int brcm_sata_phy_init(struct phy *phy) static void brcm_stb_sata_calibrate(struct brcm_sata_port *port) { - void __iomem *base = brcm_sata_pcb_base(port); u32 tmp = BIT(8); - brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1, + brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1, ~tmp, tmp); } diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig index b2db916db64b..459545871608 100644 --- a/drivers/phy/cadence/Kconfig +++ b/drivers/phy/cadence/Kconfig @@ -3,13 +3,13 @@ # Phy drivers for Cadence PHYs # -config PHY_CADENCE_DP - tristate "Cadence MHDP DisplayPort PHY driver" +config PHY_CADENCE_TORRENT + tristate "Cadence Torrent PHY driver" depends on OF depends on HAS_IOMEM select GENERIC_PHY help - Support for Cadence MHDP DisplayPort PHY. + Support for Cadence Torrent PHY. config PHY_CADENCE_DPHY tristate "Cadence D-PHY Support" diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile index 8f89560f1711..6a7ffc6ea599 100644 --- a/drivers/phy/cadence/Makefile +++ b/drivers/phy/cadence/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o +obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c deleted file mode 100644 index bc10cb264b7a..000000000000 --- a/drivers/phy/cadence/phy-cadence-dp.c +++ /dev/null @@ -1,541 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Cadence MHDP DisplayPort SD0801 PHY driver. - * - * Copyright 2018 Cadence Design Systems, Inc. - * - */ - -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/iopoll.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/phy/phy.h> -#include <linux/platform_device.h> - -#define DEFAULT_NUM_LANES 2 -#define MAX_NUM_LANES 4 -#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ - -#define POLL_TIMEOUT_US 2000 -#define LANE_MASK 0x7 - -/* - * register offsets from DPTX PHY register block base (i.e MHDP - * register base + 0x30a00) - */ -#define PHY_AUX_CONFIG 0x00 -#define PHY_AUX_CTRL 0x04 -#define PHY_RESET 0x20 -#define PHY_PMA_XCVR_PLLCLK_EN 0x24 -#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28 -#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c -#define PHY_POWER_STATE_LN_0 0x0000 -#define PHY_POWER_STATE_LN_1 0x0008 -#define PHY_POWER_STATE_LN_2 0x0010 -#define PHY_POWER_STATE_LN_3 0x0018 -#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30 -#define PHY_PMA_CMN_READY 0x34 -#define PHY_PMA_XCVR_TX_VMARGIN 0x38 -#define PHY_PMA_XCVR_TX_DEEMPH 0x3c - -/* - * register offsets from SD0801 PHY register block base (i.e MHDP - * register base + 0x500000) - */ -#define CMN_SSM_BANDGAP_TMR 0x00084 -#define CMN_SSM_BIAS_TMR 0x00088 -#define CMN_PLLSM0_PLLPRE_TMR 0x000a8 -#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0 -#define CMN_PLLSM1_PLLPRE_TMR 0x000c8 -#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0 -#define CMN_BGCAL_INIT_TMR 0x00190 -#define CMN_BGCAL_ITER_TMR 0x00194 -#define CMN_IBCAL_INIT_TMR 0x001d0 -#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210 -#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214 -#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218 -#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220 -#define CMN_PLL0_INTDIV_M0 0x00240 -#define CMN_PLL0_FRACDIVL_M0 0x00244 -#define CMN_PLL0_FRACDIVH_M0 0x00248 -#define CMN_PLL0_HIGH_THR_M0 0x0024c -#define CMN_PLL0_DSM_DIAG_M0 0x00250 -#define CMN_PLL0_LOCK_PLLCNT_START 0x00278 -#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310 -#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314 -#define CMN_PLL1_DSM_DIAG_M0 0x00350 -#define CMN_TXPUCAL_INIT_TMR 0x00410 -#define CMN_TXPUCAL_ITER_TMR 0x00414 -#define CMN_TXPDCAL_INIT_TMR 0x00430 -#define CMN_TXPDCAL_ITER_TMR 0x00434 -#define CMN_RXCAL_INIT_TMR 0x00450 -#define CMN_RXCAL_ITER_TMR 0x00454 -#define CMN_SD_CAL_INIT_TMR 0x00490 -#define CMN_SD_CAL_ITER_TMR 0x00494 -#define CMN_SD_CAL_REFTIM_START 0x00498 -#define CMN_SD_CAL_PLLCNT_START 0x004a0 -#define CMN_PDIAG_PLL0_CTRL_M0 0x00680 -#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684 -#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690 -#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694 -#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698 -#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0 -#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4 -#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704 -#define XCVR_DIAG_PLLDRC_CTRL 0x10394 -#define XCVR_DIAG_HSCLK_SEL 0x10398 -#define XCVR_DIAG_HSCLK_DIV 0x1039c -#define TX_PSC_A0 0x10400 -#define TX_PSC_A1 0x10404 -#define TX_PSC_A2 0x10408 -#define TX_PSC_A3 0x1040c -#define RX_PSC_A0 0x20000 -#define RX_PSC_A1 0x20004 -#define RX_PSC_A2 0x20008 -#define RX_PSC_A3 0x2000c -#define PHY_PLL_CFG 0x30038 - -struct cdns_dp_phy { - void __iomem *base; /* DPTX registers base */ - void __iomem *sd_base; /* SD0801 registers base */ - u32 num_lanes; /* Number of lanes to use */ - u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ - struct device *dev; -}; - -static int cdns_dp_phy_init(struct phy *phy); -static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy, - unsigned int lane); -static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy, - unsigned int offset, - unsigned char start_bit, - unsigned char num_bits, - unsigned int val); - -static const struct phy_ops cdns_dp_phy_ops = { - .init = cdns_dp_phy_init, - .owner = THIS_MODULE, -}; - -static int cdns_dp_phy_init(struct phy *phy) -{ - unsigned char lane_bits; - - struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy); - - writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */ - - /* PHY PMA registers configuration function */ - cdns_dp_phy_pma_cfg(cdns_phy); - - /* - * Set lines power state to A0 - * Set lines pll clk enable to 0 - */ - - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_0, 6, 0x0000); - - if (cdns_phy->num_lanes >= 2) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_1, 6, 0x0000); - - if (cdns_phy->num_lanes == 4) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_2, 6, 0); - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_3, 6, 0); - } - } - - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, - 0, 1, 0x0000); - - if (cdns_phy->num_lanes >= 2) { - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, - 1, 1, 0x0000); - if (cdns_phy->num_lanes == 4) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_PLLCLK_EN, - 2, 1, 0x0000); - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_PLLCLK_EN, - 3, 1, 0x0000); - } - } - - /* - * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on - * used lanes - */ - lane_bits = (1 << cdns_phy->num_lanes) - 1; - writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits), - cdns_phy->base + PHY_RESET); - - /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ - writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN); - - /* PHY PMA registers configuration functions */ - cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy); - cdns_dp_phy_pma_cmn_rate(cdns_phy); - - /* take out of reset */ - cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1); - cdns_dp_phy_wait_pma_cmn_ready(cdns_phy); - cdns_dp_phy_run(cdns_phy); - - return 0; -} - -static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy) -{ - unsigned int reg; - int ret; - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg, - reg & 1, 0, 500); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for PMA common ready\n"); -} - -static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy) -{ - unsigned int i; - - /* PMA common configuration */ - cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < cdns_phy->num_lanes; i++) - cdns_dp_phy_pma_lane_cfg(cdns_phy, i); -} - -static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy) -{ - /* refclock registers - assumes 25 MHz refclock */ - writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR); - writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR); - writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR); - writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR); - writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR); - writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR); - writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR); - writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR); - writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR); - writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR); - writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR); - writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR); - writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR); - writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START); - writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START); - /* PLL registers */ - writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0); - writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0); - writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0); - writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0); - writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR); - writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR); - writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR); - writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR); - writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START); -} - -static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy) -{ - /* Assumes 25 MHz refclock */ - switch (cdns_phy->max_bit_rate) { - /* Setting VCO for 10.8GHz */ - case 2700: - case 5400: - writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 9.72GHz */ - case 2430: - case 3240: - writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 8.64GHz */ - case 2160: - case 4320: - writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 8.1GHz */ - case 8100: - writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - } - - writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0); - writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START); -} - -static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy) -{ - unsigned int clk_sel_val = 0; - unsigned int hsclk_div_val = 0; - unsigned int i; - - /* 16'h0000 for single DP link configuration */ - writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG); - - switch (cdns_phy->max_bit_rate) { - case 1620: - clk_sel_val = 0x0f01; - hsclk_div_val = 2; - break; - case 2160: - case 2430: - case 2700: - clk_sel_val = 0x0701; - hsclk_div_val = 1; - break; - case 3240: - clk_sel_val = 0x0b00; - hsclk_div_val = 2; - break; - case 4320: - case 5400: - clk_sel_val = 0x0301; - hsclk_div_val = 0; - break; - case 8100: - clk_sel_val = 0x0200; - hsclk_div_val = 0; - break; - } - - writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < cdns_phy->num_lanes; i++) { - writel(hsclk_div_val, - cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11))); - } -} - -static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy, - unsigned int lane) -{ - unsigned int lane_bits = (lane & LANE_MASK) << 11; - - /* Writing Tx/Rx Power State Controllers registers */ - writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits)); - writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits)); - writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits)); - - writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits)); -} - -static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy) -{ - unsigned int read_val; - u32 write_val1 = 0; - u32 write_val2 = 0; - u32 mask = 0; - int ret; - - /* - * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the - * master lane - */ - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK, - read_val, read_val & 1, 0, POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link PLL clock enable ack\n"); - - ndelay(100); - - switch (cdns_phy->num_lanes) { - - case 1: /* lane 0 */ - write_val1 = 0x00000004; - write_val2 = 0x00000001; - mask = 0x0000003f; - break; - case 2: /* lane 0-1 */ - write_val1 = 0x00000404; - write_val2 = 0x00000101; - mask = 0x00003f3f; - break; - case 4: /* lane 0-3 */ - write_val1 = 0x04040404; - write_val2 = 0x01010101; - mask = 0x3f3f3f3f; - break; - } - - writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK, - read_val, (read_val & mask) == write_val1, 0, - POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link power state ack\n"); - - writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - ndelay(100); - - writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK, - read_val, (read_val & mask) == write_val2, 0, - POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link power state ack\n"); - - writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - ndelay(100); -} - -static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy, - unsigned int offset, - unsigned char start_bit, - unsigned char num_bits, - unsigned int val) -{ - unsigned int read_val; - - read_val = readl(cdns_phy->base + offset); - writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) << - start_bit))), cdns_phy->base + offset); -} - -static int cdns_dp_phy_probe(struct platform_device *pdev) -{ - struct resource *regs; - struct cdns_dp_phy *cdns_phy; - struct device *dev = &pdev->dev; - struct phy_provider *phy_provider; - struct phy *phy; - int err; - - cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL); - if (!cdns_phy) - return -ENOMEM; - - cdns_phy->dev = &pdev->dev; - - phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops); - if (IS_ERR(phy)) { - dev_err(dev, "failed to create DisplayPort PHY\n"); - return PTR_ERR(phy); - } - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(cdns_phy->base)) - return PTR_ERR(cdns_phy->base); - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(cdns_phy->sd_base)) - return PTR_ERR(cdns_phy->sd_base); - - err = device_property_read_u32(dev, "num_lanes", - &(cdns_phy->num_lanes)); - if (err) - cdns_phy->num_lanes = DEFAULT_NUM_LANES; - - switch (cdns_phy->num_lanes) { - case 1: - case 2: - case 4: - /* valid number of lanes */ - break; - default: - dev_err(dev, "unsupported number of lanes: %d\n", - cdns_phy->num_lanes); - return -EINVAL; - } - - err = device_property_read_u32(dev, "max_bit_rate", - &(cdns_phy->max_bit_rate)); - if (err) - cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE; - - switch (cdns_phy->max_bit_rate) { - case 2160: - case 2430: - case 2700: - case 3240: - case 4320: - case 5400: - case 8100: - /* valid bit rate */ - break; - default: - dev_err(dev, "unsupported max bit rate: %dMbps\n", - cdns_phy->max_bit_rate); - return -EINVAL; - } - - phy_set_drvdata(phy, cdns_phy); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - - dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", - cdns_phy->num_lanes, - cdns_phy->max_bit_rate / 1000, - cdns_phy->max_bit_rate % 1000); - - return PTR_ERR_OR_ZERO(phy_provider); -} - -static const struct of_device_id cdns_dp_phy_of_match[] = { - { - .compatible = "cdns,dp-phy" - }, - {} -}; -MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match); - -static struct platform_driver cdns_dp_phy_driver = { - .probe = cdns_dp_phy_probe, - .driver = { - .name = "cdns-dp-phy", - .of_match_table = cdns_dp_phy_of_match, - } -}; -module_platform_driver(cdns_dp_phy_driver); - -MODULE_AUTHOR("Cadence Design Systems, Inc."); -MODULE_DESCRIPTION("Cadence MHDP PHY driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c new file mode 100644 index 000000000000..7116127358ee --- /dev/null +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -0,0 +1,1944 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Cadence Torrent SD0801 PHY driver. + * + * Copyright 2018 Cadence Design Systems, Inc. + * + */ + +#include <dt-bindings/phy/phy.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/regmap.h> + +#define REF_CLK_19_2MHz 19200000 +#define REF_CLK_25MHz 25000000 + +#define DEFAULT_NUM_LANES 4 +#define MAX_NUM_LANES 4 +#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ + +#define POLL_TIMEOUT_US 5000 + +#define TORRENT_COMMON_CDB_OFFSET 0x0 + +#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x4000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) + +#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x8000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) + +#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \ + (0xC000 << (block_offset)) + +#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \ + (0xE000 << (block_offset)) + +#define TORRENT_DPTX_PHY_OFFSET 0x0 + +/* + * register offsets from DPTX PHY register block base (i.e MHDP + * register base + 0x30a00) + */ +#define PHY_AUX_CTRL 0x04 +#define PHY_RESET 0x20 +#define PMA_TX_ELEC_IDLE_MASK 0xF0U +#define PMA_TX_ELEC_IDLE_SHIFT 4 +#define PHY_L00_RESET_N_MASK 0x01U +#define PHY_PMA_XCVR_PLLCLK_EN 0x24 +#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28 +#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c +#define PHY_POWER_STATE_LN_0 0x0000 +#define PHY_POWER_STATE_LN_1 0x0008 +#define PHY_POWER_STATE_LN_2 0x0010 +#define PHY_POWER_STATE_LN_3 0x0018 +#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU +#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30 +#define PHY_PMA_CMN_READY 0x34 + +/* + * register offsets from SD0801 PHY register block base (i.e MHDP + * register base + 0x500000) + */ +#define CMN_SSM_BANDGAP_TMR 0x0021U +#define CMN_SSM_BIAS_TMR 0x0022U +#define CMN_PLLSM0_PLLPRE_TMR 0x002AU +#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU +#define CMN_PLLSM1_PLLPRE_TMR 0x0032U +#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U +#define CMN_BGCAL_INIT_TMR 0x0064U +#define CMN_BGCAL_ITER_TMR 0x0065U +#define CMN_IBCAL_INIT_TMR 0x0074U +#define CMN_PLL0_VCOCAL_TCTRL 0x0082U +#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U +#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U +#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U +#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U +#define CMN_PLL0_INTDIV_M0 0x0090U +#define CMN_PLL0_FRACDIVL_M0 0x0091U +#define CMN_PLL0_FRACDIVH_M0 0x0092U +#define CMN_PLL0_HIGH_THR_M0 0x0093U +#define CMN_PLL0_DSM_DIAG_M0 0x0094U +#define CMN_PLL0_SS_CTRL1_M0 0x0098U +#define CMN_PLL0_SS_CTRL2_M0 0x0099U +#define CMN_PLL0_SS_CTRL3_M0 0x009AU +#define CMN_PLL0_SS_CTRL4_M0 0x009BU +#define CMN_PLL0_LOCK_REFCNT_START 0x009CU +#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU +#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU +#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U +#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U +#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U +#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U +#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U +#define CMN_PLL1_INTDIV_M0 0x00D0U +#define CMN_PLL1_FRACDIVL_M0 0x00D1U +#define CMN_PLL1_FRACDIVH_M0 0x00D2U +#define CMN_PLL1_HIGH_THR_M0 0x00D3U +#define CMN_PLL1_DSM_DIAG_M0 0x00D4U +#define CMN_PLL1_SS_CTRL1_M0 0x00D8U +#define CMN_PLL1_SS_CTRL2_M0 0x00D9U +#define CMN_PLL1_SS_CTRL3_M0 0x00DAU +#define CMN_PLL1_SS_CTRL4_M0 0x00DBU +#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU +#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU +#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU +#define CMN_TXPUCAL_INIT_TMR 0x0104U +#define CMN_TXPUCAL_ITER_TMR 0x0105U +#define CMN_TXPDCAL_INIT_TMR 0x010CU +#define CMN_TXPDCAL_ITER_TMR 0x010DU +#define CMN_RXCAL_INIT_TMR 0x0114U +#define CMN_RXCAL_ITER_TMR 0x0115U +#define CMN_SD_CAL_INIT_TMR 0x0124U +#define CMN_SD_CAL_ITER_TMR 0x0125U +#define CMN_SD_CAL_REFTIM_START 0x0126U +#define CMN_SD_CAL_PLLCNT_START 0x0128U +#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U +#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U +#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U +#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U +#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U +#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U +#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U +#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U +#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U +#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U +#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U +#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U + +/* PMA TX Lane registers */ +#define TX_TXCC_CTRL 0x0040U +#define TX_TXCC_CPOST_MULT_00 0x004CU +#define TX_TXCC_MGNFS_MULT_000 0x0050U +#define DRV_DIAG_TX_DRV 0x00C6U +#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U +#define XCVR_DIAG_HSCLK_SEL 0x00E6U +#define XCVR_DIAG_HSCLK_DIV 0x00E7U +#define XCVR_DIAG_BIDI_CTRL 0x00EAU +#define TX_PSC_A0 0x0100U +#define TX_PSC_A2 0x0102U +#define TX_PSC_A3 0x0103U +#define TX_RCVDET_ST_TMR 0x0123U +#define TX_DIAG_ACYA 0x01E7U +#define TX_DIAG_ACYA_HBDC_MASK 0x0001U + +/* PMA RX Lane registers */ +#define RX_PSC_A0 0x0000U +#define RX_PSC_A2 0x0002U +#define RX_PSC_A3 0x0003U +#define RX_PSC_CAL 0x0006U +#define RX_REE_GCSM1_CTRL 0x0108U +#define RX_REE_GCSM2_CTRL 0x0110U +#define RX_REE_PERGCSM_CTRL 0x0118U + +/* PHY PCS common registers */ +#define PHY_PLL_CFG 0x000EU + +/* PHY PMA common registers */ +#define PHY_PMA_CMN_CTRL2 0x0001U +#define PHY_PMA_PLL_RAW_CTRL 0x0003U + +static const struct reg_field phy_pll_cfg = + REG_FIELD(PHY_PLL_CFG, 0, 1); + +static const struct reg_field phy_pma_cmn_ctrl_2 = + REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7); + +static const struct reg_field phy_pma_pll_raw_ctrl = + REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1); + +static const struct reg_field phy_reset_ctrl = + REG_FIELD(PHY_RESET, 8, 8); + +static const struct of_device_id cdns_torrent_phy_of_match[]; + +struct cdns_torrent_inst { + struct phy *phy; + u32 mlane; + u32 phy_type; + u32 num_lanes; + struct reset_control *lnk_rst; +}; + +struct cdns_torrent_phy { + void __iomem *base; /* DPTX registers base */ + void __iomem *sd_base; /* SD0801 registers base */ + u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ + struct reset_control *phy_rst; + struct device *dev; + struct clk *clk; + unsigned long ref_clk_rate; + struct cdns_torrent_inst phys[MAX_NUM_LANES]; + int nsubnodes; + struct regmap *regmap; + struct regmap *regmap_common_cdb; + struct regmap *regmap_phy_pcs_common_cdb; + struct regmap *regmap_phy_pma_common_cdb; + struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_dptx_phy_reg; + struct regmap_field *phy_pll_cfg; + struct regmap_field *phy_pma_cmn_ctrl_2; + struct regmap_field *phy_pma_pll_raw_ctrl; + struct regmap_field *phy_reset_ctrl; +}; + +enum phy_powerstate { + POWERSTATE_A0 = 0, + /* Powerstate A1 is unused */ + POWERSTATE_A2 = 2, + POWERSTATE_A3 = 3, +}; + +static int cdns_torrent_dp_init(struct phy *phy); +static int cdns_torrent_dp_exit(struct phy *phy); +static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes); +static +int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy); +static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst); +static +void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy); +static +void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc); +static +void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy); +static +void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc); +static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, + unsigned int lane); +static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, + u32 rate, u32 num_lanes); +static int cdns_torrent_dp_configure(struct phy *phy, + union phy_configure_opts *opts); +static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes, + enum phy_powerstate powerstate); +static int cdns_torrent_phy_on(struct phy *phy); +static int cdns_torrent_phy_off(struct phy *phy); + +static const struct phy_ops cdns_torrent_phy_ops = { + .init = cdns_torrent_dp_init, + .exit = cdns_torrent_dp_exit, + .configure = cdns_torrent_dp_configure, + .power_on = cdns_torrent_phy_on, + .power_off = cdns_torrent_phy_off, + .owner = THIS_MODULE, +}; + +struct cdns_torrent_data { + u8 block_offset_shift; + u8 reg_offset_shift; +}; + +struct cdns_regmap_cdb_context { + struct device *dev; + void __iomem *base; + u8 reg_offset_shift; +}; + +static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg << ctx->reg_offset_shift; + + writew(val, ctx->base + offset); + + return 0; +} + +static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg << ctx->reg_offset_shift; + + *val = readw(ctx->base + offset); + return 0; +} + +static int cdns_regmap_dptx_write(void *context, unsigned int reg, + unsigned int val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg; + + writel(val, ctx->base + offset); + + return 0; +} + +static int cdns_regmap_dptx_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg; + + *val = readl(ctx->base + offset); + return 0; +} + +#define TORRENT_TX_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "torrent_tx_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +#define TORRENT_RX_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "torrent_rx_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +static struct regmap_config cdns_torrent_tx_lane_cdb_config[] = { + TORRENT_TX_LANE_CDB_REGMAP_CONF("0"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("1"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("2"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("3"), +}; + +static struct regmap_config cdns_torrent_rx_lane_cdb_config[] = { + TORRENT_RX_LANE_CDB_REGMAP_CONF("0"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("1"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("2"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("3"), +}; + +static struct regmap_config cdns_torrent_common_cdb_config = { + .name = "torrent_common_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { + .name = "torrent_phy_pcs_cmn_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = { + .name = "torrent_phy_pma_cmn_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_dptx_phy_config = { + .name = "torrent_dptx_phy", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_dptx_write, + .reg_read = cdns_regmap_dptx_read, +}; + +/* PHY mmr access functions */ + +static void cdns_torrent_phy_write(struct regmap *regmap, u32 offset, u32 val) +{ + regmap_write(regmap, offset, val); +} + +static u32 cdns_torrent_phy_read(struct regmap *regmap, u32 offset) +{ + unsigned int val; + + regmap_read(regmap, offset, &val); + return val; +} + +/* DPTX mmr access functions */ + +static void cdns_torrent_dp_write(struct regmap *regmap, u32 offset, u32 val) +{ + regmap_write(regmap, offset, val); +} + +static u32 cdns_torrent_dp_read(struct regmap *regmap, u32 offset) +{ + u32 val; + + regmap_read(regmap, offset, &val); + return val; +} + +/* + * Structure used to store values of PHY registers for voltage-related + * coefficients, for particular voltage swing and pre-emphasis level. Values + * are shared across all physical lanes. + */ +struct coefficients { + /* Value of DRV_DIAG_TX_DRV register to use */ + u16 diag_tx_drv; + /* Value of TX_TXCC_MGNFS_MULT_000 register to use */ + u16 mgnfs_mult; + /* Value of TX_TXCC_CPOST_MULT_00 register to use */ + u16 cpost_mult; +}; + +/* + * Array consists of values of voltage-related registers for sd0801 PHY. A value + * of 0xFFFF is a placeholder for invalid combination, and will never be used. + */ +static const struct coefficients vltg_coeff[4][4] = { + /* voltage swing 0, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x002A, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F, + .cpost_mult = 0x0014}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0012, + .cpost_mult = 0x0020}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x002A} + }, + + /* voltage swing 1, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013, + .cpost_mult = 0x0012}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x001F}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + }, + + /* voltage swing 2, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x0013}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + }, + + /* voltage swing 3, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + } +}; + +/* + * Enable or disable PLL for selected lanes. + */ +static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp, + bool enable) +{ + u32 rd_val; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + /* + * Used to determine, which bits to check for or enable in + * PHY_PMA_XCVR_PLLCLK_EN register. + */ + u32 pll_bits; + /* Used to enable or disable lanes. */ + u32 pll_val; + + /* Select values of registers and mask, depending on enabled lane + * count. + */ + switch (dp->lanes) { + /* lane 0 */ + case (1): + pll_bits = 0x00000001; + break; + /* lanes 0-1 */ + case (2): + pll_bits = 0x00000003; + break; + /* lanes 0-3, all */ + default: + pll_bits = 0x0000000F; + break; + } + + if (enable) + pll_val = pll_bits; + else + pll_val = 0x00000000; + + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val); + + /* Wait for acknowledgment from PHY. */ + ret = regmap_read_poll_timeout(regmap, + PHY_PMA_XCVR_PLLCLK_EN_ACK, + rd_val, + (rd_val & pll_bits) == pll_val, + 0, POLL_TIMEOUT_US); + ndelay(100); + return ret; +} + +/* + * Perform register operations related to setting link rate, once powerstate is + * set and PLL disable request was processed. + */ +static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 ret; + u32 read_val; + + /* Disable the cmn_pll0_en before re-programming the new data rate. */ + regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0); + + /* + * Wait for PLL ready de-assertion. + * For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2, + read_val, + ((read_val >> 2) & 0x01) != 0, + 0, POLL_TIMEOUT_US); + if (ret) + return ret; + ndelay(200); + + /* DP Rate Change - VCO Output settings. */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) { + /* PMA common configuration 19.2MHz */ + cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, + dp->ssc); + cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); + } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) { + /* PMA common configuration 25MHz */ + cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, + dp->ssc); + cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); + } + cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes); + + /* Enable the cmn_pll0_en. */ + regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3); + + /* + * Wait for PLL ready assertion. + * For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2, + read_val, + (read_val & 0x01) != 0, + 0, POLL_TIMEOUT_US); + return ret; +} + +/* + * Verify, that parameters to configure PHY with are correct. + */ +static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst, + struct phy_configure_opts_dp *dp) +{ + u8 i; + + /* If changing link rate was required, verify it's supported. */ + if (dp->set_rate) { + switch (dp->link_rate) { + case 1620: + case 2160: + case 2430: + case 2700: + case 3240: + case 4320: + case 5400: + case 8100: + /* valid bit rate */ + break; + default: + return -EINVAL; + } + } + + /* Verify lane count. */ + switch (dp->lanes) { + case 1: + case 2: + case 4: + /* valid lane count. */ + break; + default: + return -EINVAL; + } + + /* Check against actual number of PHY's lanes. */ + if (dp->lanes > inst->num_lanes) + return -EINVAL; + + /* + * If changing voltages is required, check swing and pre-emphasis + * levels, per-lane. + */ + if (dp->set_voltages) { + /* Lane count verified previously. */ + for (i = 0; i < dp->lanes; i++) { + if (dp->voltage[i] > 3 || dp->pre[i] > 3) + return -EINVAL; + + /* Sum of voltage swing and pre-emphasis levels cannot + * exceed 3. + */ + if (dp->voltage[i] + dp->pre[i] > 3) + return -EINVAL; + } + } + + return 0; +} + +/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */ +static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes) +{ + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + u32 pwr_state = cdns_torrent_dp_read(regmap, + PHY_PMA_XCVR_POWER_STATE_REQ); + u32 pll_clk_en = cdns_torrent_dp_read(regmap, + PHY_PMA_XCVR_PLLCLK_EN); + + /* Lane 0 is always enabled. */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_0); + pll_clk_en &= ~0x01U; + + if (num_lanes > 1) { + /* lane 1 */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_1); + pll_clk_en &= ~(0x01U << 1); + } + + if (num_lanes > 2) { + /* lanes 2 and 3 */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_2); + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_3); + pll_clk_en &= ~(0x01U << 2); + pll_clk_en &= ~(0x01U << 3); + } + + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state); + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_clk_en); +} + +/* Configure lane count as required. */ +static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 value; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + u8 lane_mask = (1 << dp->lanes) - 1; + + value = cdns_torrent_dp_read(regmap, PHY_RESET); + /* clear pma_tx_elec_idle_ln_* bits. */ + value &= ~PMA_TX_ELEC_IDLE_MASK; + /* Assert pma_tx_elec_idle_ln_* for disabled lanes. */ + value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) & + PMA_TX_ELEC_IDLE_MASK; + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + /* reset the link by asserting phy_l00_reset_n low */ + cdns_torrent_dp_write(regmap, PHY_RESET, + value & (~PHY_L00_RESET_N_MASK)); + + /* + * Assert lane reset on unused lanes and lane 0 so they remain in reset + * and powered down when re-enabling the link + */ + value = (value & 0x0000FFF0) | (0x0000000E & lane_mask); + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes); + + /* release phy_l0*_reset_n based on used laneCount */ + value = (value & 0x0000FFF0) | (0x0000000F & lane_mask); + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + /* Wait, until PHY gets ready after releasing PHY reset signal. */ + ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy); + if (ret) + return ret; + + ndelay(100); + + /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001); + + ret = cdns_torrent_dp_run(cdns_phy, dp->lanes); + + return ret; +} + +/* Configure link rate as required. */ +static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 ret; + + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A3); + if (ret) + return ret; + ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false); + if (ret) + return ret; + ndelay(200); + + ret = cdns_torrent_dp_configure_rate(cdns_phy, dp); + if (ret) + return ret; + ndelay(200); + + ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true); + if (ret) + return ret; + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A2); + if (ret) + return ret; + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A0); + if (ret) + return ret; + ndelay(900); + + return ret; +} + +/* Configure voltage swing and pre-emphasis for all enabled lanes. */ +static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u8 lane; + u16 val; + + for (lane = 0; lane < dp->lanes; lane++) { + val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA); + /* + * Write 1 to register bit TX_DIAG_ACYA[0] to freeze the + * current state of the analog TX driver. + */ + val |= TX_DIAG_ACYA_HBDC_MASK; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA, val); + + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_CTRL, 0x08A4); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + DRV_DIAG_TX_DRV, val); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_MGNFS_MULT_000, + val); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_CPOST_MULT_00, + val); + + val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA); + /* + * Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of + * analog TX driver to reflect the new programmed one. + */ + val &= ~TX_DIAG_ACYA_HBDC_MASK; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA, val); + } +}; + +static int cdns_torrent_dp_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = cdns_torrent_dp_verify_config(inst, &opts->dp); + if (ret) { + dev_err(&phy->dev, "invalid params for phy configure\n"); + return ret; + } + + if (opts->dp.set_lanes) { + ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp); + if (ret) { + dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n"); + return ret; + } + } + + if (opts->dp.set_rate) { + ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp); + if (ret) { + dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n"); + return ret; + } + } + + if (opts->dp.set_voltages) + cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp); + + return ret; +} + +static int cdns_torrent_dp_init(struct phy *phy) +{ + unsigned char lane_bits; + int ret; + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + ret = clk_prepare_enable(cdns_phy->clk); + if (ret) { + dev_err(cdns_phy->dev, "Failed to prepare ref clock\n"); + return ret; + } + + cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk); + if (!(cdns_phy->ref_clk_rate)) { + dev_err(cdns_phy->dev, "Failed to get ref clock rate\n"); + clk_disable_unprepare(cdns_phy->clk); + return -EINVAL; + } + + switch (cdns_phy->ref_clk_rate) { + case REF_CLK_19_2MHz: + case REF_CLK_25MHz: + /* Valid Ref Clock Rate */ + break; + default: + dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n"); + return -EINVAL; + } + + cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */ + + /* PHY PMA registers configuration function */ + cdns_torrent_dp_pma_cfg(cdns_phy, inst); + + /* + * Set lines power state to A0 + * Set lines pll clk enable to 0 + */ + cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes); + + /* + * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on + * used lanes + */ + lane_bits = (1 << inst->num_lanes) - 1; + cdns_torrent_dp_write(regmap, PHY_RESET, + ((0xF & ~lane_bits) << 4) | (0xF & lane_bits)); + + /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001); + + /* PHY PMA registers configuration functions */ + /* Initialize PHY with max supported link rate, without SSC. */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, + cdns_phy->max_bit_rate, + false); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, + cdns_phy->max_bit_rate, + false); + cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, + inst->num_lanes); + + /* take out of reset */ + regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1); + + cdns_torrent_phy_on(phy); + + ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy); + if (ret) + return ret; + + ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes); + + return ret; +} + +static int cdns_torrent_dp_exit(struct phy *phy) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + + clk_disable_unprepare(cdns_phy->clk); + return 0; +} + +static +int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy) +{ + unsigned int reg; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg, + reg & 1, 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for PMA common ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst) +{ + unsigned int i; + + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + /* PMA common configuration 19.2MHz */ + cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + /* PMA common configuration 25MHz */ + cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); + + /* PMA lane configuration to deal with multi-link operation */ + for (i = 0; i < inst->num_lanes; i++) + cdns_torrent_dp_pma_lane_cfg(cdns_phy, i); +} + +static +void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* refclock registers - assumes 19.2 MHz refclock */ + cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0014); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0027); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00A1); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0027); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00A1); + cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x0060); + cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x0060); + cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0014); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x0018); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x0018); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x0240); + cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000B); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x0137); + + /* PLL registers */ + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00C0); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00C0); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0260); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0260); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); +} + +/* + * Set registers responsible for enabling and configuring SSC, with second and + * third register values provided by parameters. + */ +static +void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val, u32 ctrl3_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 19.2 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0012); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0012); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, + 0x006A); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, + 0x0069); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, + 0x0069); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, + 0x006A); + break; + } + + if (ssc) { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099); +} + +static +void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* refclock registers - assumes 25 MHz refclock */ + cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0019); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0032); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00D1); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0032); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00D1); + cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x007D); + cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x007D); + cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0019); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x001E); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x001E); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x02EE); + cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000E); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x012B); + + /* PLL registers */ + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00FA); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00FA); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0317); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0317); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); +} + +/* + * Set registers responsible for enabling and configuring SSC, with second + * register value provided by a parameter. + */ +static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 25 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A); + break; + } + + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + + if (ssc) { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7); +} + +static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, + u32 rate, u32 num_lanes) +{ + unsigned int clk_sel_val = 0; + unsigned int hsclk_div_val = 0; + unsigned int i; + + /* 16'h0000 for single DP link configuration */ + regmap_field_write(cdns_phy->phy_pll_cfg, 0x0); + + switch (rate) { + case 1620: + clk_sel_val = 0x0f01; + hsclk_div_val = 2; + break; + case 2160: + case 2430: + case 2700: + clk_sel_val = 0x0701; + hsclk_div_val = 1; + break; + case 3240: + clk_sel_val = 0x0b00; + hsclk_div_val = 2; + break; + case 4320: + case 5400: + clk_sel_val = 0x0301; + hsclk_div_val = 0; + break; + case 8100: + clk_sel_val = 0x0200; + hsclk_div_val = 0; + break; + } + + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val); + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val); + + /* PMA lane configuration to deal with multi-link operation */ + for (i = 0; i < num_lanes; i++) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i], + XCVR_DIAG_HSCLK_DIV, hsclk_div_val); +} + +static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, + unsigned int lane) +{ + /* Per lane, refclock-dependent receiver detection setting */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_RCVDET_ST_TMR, 0x0780); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_RCVDET_ST_TMR, 0x09C4); + + /* Writing Tx/Rx Power State Controllers registers */ + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A0, 0x00FB); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A2, 0x04AA); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A3, 0x04AA); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A0, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A2, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A3, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_CAL, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_GCSM1_CTRL, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_GCSM2_CTRL, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_PERGCSM_CTRL, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_BIDI_CTRL, 0x000F); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_PLLDRC_CTRL, 0x0001); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_HSCLK_SEL, 0x0000); +} + +static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes, + enum phy_powerstate powerstate) +{ + /* Register value for power state for a single byte. */ + u32 value_part; + u32 value; + u32 mask; + u32 read_val; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + switch (powerstate) { + case (POWERSTATE_A0): + value_part = 0x01U; + break; + case (POWERSTATE_A2): + value_part = 0x04U; + break; + default: + /* Powerstate A3 */ + value_part = 0x08U; + break; + } + + /* Select values of registers and mask, depending on enabled + * lane count. + */ + switch (num_lanes) { + /* lane 0 */ + case (1): + value = value_part; + mask = 0x0000003FU; + break; + /* lanes 0-1 */ + case (2): + value = (value_part + | (value_part << 8)); + mask = 0x00003F3FU; + break; + /* lanes 0-3, all */ + default: + value = (value_part + | (value_part << 8) + | (value_part << 16) + | (value_part << 24)); + mask = 0x3F3F3F3FU; + break; + } + + /* Set power state A<n>. */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value); + /* Wait, until PHY acknowledges power state completion. */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, + read_val, (read_val & mask) == value, 0, + POLL_TIMEOUT_US); + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); + ndelay(100); + + return ret; +} + +static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes) +{ + unsigned int read_val; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + /* + * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the + * master lane + */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK, + read_val, read_val & 1, + 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for link PLL clock enable ack\n"); + return ret; + } + + ndelay(100); + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A2); + if (ret) + return ret; + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A0); + + return ret; +} + +static int cdns_torrent_phy_on(struct phy *phy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; + + /* Take the PHY lane group out of reset */ + return reset_control_deassert(inst->lnk_rst); +} + +static int cdns_torrent_phy_off(struct phy *phy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = reset_control_assert(cdns_phy->phy_rst); + if (ret) + return ret; + + return reset_control_assert(inst->lnk_rst); +} + +static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base, + u32 block_offset, + u8 reg_offset_shift, + const struct regmap_config *config) +{ + struct cdns_regmap_cdb_context *ctx; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->dev = dev; + ctx->base = base + block_offset; + ctx->reg_offset_shift = reg_offset_shift; + + return devm_regmap_init(dev, NULL, ctx, config); +} + +static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy) +{ + struct device *dev = cdns_phy->dev; + struct regmap_field *field; + struct regmap *regmap; + + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PLL_CFG reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pll_cfg = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_cmn_ctrl_2 = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_pll_raw_ctrl = field; + + regmap = cdns_phy->regmap_dptx_phy_reg; + field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_RESET reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_reset_ctrl = field; + + return 0; +} + +static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy, + void __iomem *sd_base, + void __iomem *base, + u8 block_offset_shift, + u8 reg_offset_shift) +{ + struct device *dev = cdns_phy->dev; + struct regmap *regmap; + u32 block_offset; + int i; + + for (i = 0; i < MAX_NUM_LANES; i++) { + block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_tx_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init tx lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_tx_lane_cdb[i] = regmap; + + block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_rx_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init rx lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_rx_lane_cdb[i] = regmap; + } + + block_offset = TORRENT_COMMON_CDB_OFFSET; + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_common_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_common_cdb = regmap; + + block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_phy_pcs_cmn_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pcs_common_cdb = regmap; + + block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_phy_pma_cmn_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PMA common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pma_common_cdb = regmap; + + block_offset = TORRENT_DPTX_PHY_OFFSET; + regmap = cdns_regmap_init(dev, base, block_offset, + reg_offset_shift, + &cdns_torrent_dptx_phy_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init DPTX PHY regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_dptx_phy_reg = regmap; + + return 0; +} + +static int cdns_torrent_phy_probe(struct platform_device *pdev) +{ + struct resource *regs; + struct cdns_torrent_phy *cdns_phy; + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + const struct of_device_id *match; + struct cdns_torrent_data *data; + struct device_node *child; + int ret, subnodes, node = 0, i; + + /* Get init data for this PHY */ + match = of_match_device(cdns_torrent_phy_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct cdns_torrent_data *)match->data; + + cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL); + if (!cdns_phy) + return -ENOMEM; + + dev_set_drvdata(dev, cdns_phy); + cdns_phy->dev = dev; + + cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0); + if (IS_ERR(cdns_phy->phy_rst)) { + dev_err(dev, "%s: failed to get reset\n", + dev->of_node->full_name); + return PTR_ERR(cdns_phy->phy_rst); + } + + cdns_phy->clk = devm_clk_get(dev, "refclk"); + if (IS_ERR(cdns_phy->clk)) { + dev_err(dev, "phy ref clock not found\n"); + return PTR_ERR(cdns_phy->clk); + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(cdns_phy->sd_base)) + return PTR_ERR(cdns_phy->sd_base); + + subnodes = of_get_available_child_count(dev->of_node); + if (subnodes == 0) { + dev_err(dev, "No available link subnodes found\n"); + return -EINVAL; + } else if (subnodes != 1) { + dev_err(dev, "Driver supports only one link subnode.\n"); + return -EINVAL; + } + + for_each_available_child_of_node(dev->of_node, child) { + struct phy *gphy; + + cdns_phy->phys[node].lnk_rst = + of_reset_control_array_get_exclusive(child); + if (IS_ERR(cdns_phy->phys[node].lnk_rst)) { + dev_err(dev, "%s: failed to get reset\n", + child->full_name); + ret = PTR_ERR(cdns_phy->phys[node].lnk_rst); + goto put_lnk_rst; + } + + if (of_property_read_u32(child, "reg", + &cdns_phy->phys[node].mlane)) { + dev_err(dev, "%s: No \"reg\"-property.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + if (cdns_phy->phys[node].mlane != 0) { + dev_err(dev, + "%s: Driver supports only lane-0 as master lane.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + if (of_property_read_u32(child, "cdns,phy-type", + &cdns_phy->phys[node].phy_type)) { + dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + cdns_phy->phys[node].num_lanes = DEFAULT_NUM_LANES; + of_property_read_u32(child, "cdns,num-lanes", + &cdns_phy->phys[node].num_lanes); + + if (cdns_phy->phys[node].phy_type == PHY_TYPE_DP) { + switch (cdns_phy->phys[node].num_lanes) { + case 1: + case 2: + case 4: + /* valid number of lanes */ + break; + default: + dev_err(dev, "unsupported number of lanes: %d\n", + cdns_phy->phys[node].num_lanes); + ret = -EINVAL; + goto put_child; + } + + cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE; + of_property_read_u32(child, "cdns,max-bit-rate", + &cdns_phy->max_bit_rate); + + switch (cdns_phy->max_bit_rate) { + case 1620: + case 2160: + case 2430: + case 2700: + case 3240: + case 4320: + case 5400: + case 8100: + /* valid bit rate */ + break; + default: + dev_err(dev, "unsupported max bit rate: %dMbps\n", + cdns_phy->max_bit_rate); + ret = -EINVAL; + goto put_child; + } + + /* DPTX registers */ + regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cdns_phy->base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(cdns_phy->base)) { + ret = PTR_ERR(cdns_phy->base); + goto put_child; + } + + gphy = devm_phy_create(dev, child, + &cdns_torrent_phy_ops); + if (IS_ERR(gphy)) { + ret = PTR_ERR(gphy); + goto put_child; + } + + dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", + cdns_phy->phys[node].num_lanes, + cdns_phy->max_bit_rate / 1000, + cdns_phy->max_bit_rate % 1000); + } else { + dev_err(dev, "Driver supports only PHY_TYPE_DP\n"); + ret = -ENOTSUPP; + goto put_child; + } + cdns_phy->phys[node].phy = gphy; + phy_set_drvdata(gphy, &cdns_phy->phys[node]); + + node++; + } + cdns_phy->nsubnodes = node; + + ret = cdns_regmap_init_torrent_dp(cdns_phy, cdns_phy->sd_base, + cdns_phy->base, + data->block_offset_shift, + data->reg_offset_shift); + if (ret) + goto put_lnk_rst; + + ret = cdns_regfield_init(cdns_phy); + if (ret) + goto put_lnk_rst; + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto put_lnk_rst; + } + + return 0; + +put_child: + node++; +put_lnk_rst: + for (i = 0; i < node; i++) + reset_control_put(cdns_phy->phys[i].lnk_rst); + of_node_put(child); + return ret; +} + +static int cdns_torrent_phy_remove(struct platform_device *pdev) +{ + struct cdns_torrent_phy *cdns_phy = platform_get_drvdata(pdev); + int i; + + reset_control_assert(cdns_phy->phy_rst); + for (i = 0; i < cdns_phy->nsubnodes; i++) { + reset_control_assert(cdns_phy->phys[i].lnk_rst); + reset_control_put(cdns_phy->phys[i].lnk_rst); + } + + return 0; +} + +static const struct cdns_torrent_data cdns_map_torrent = { + .block_offset_shift = 0x2, + .reg_offset_shift = 0x2, +}; + +static const struct cdns_torrent_data ti_j721e_map_torrent = { + .block_offset_shift = 0x0, + .reg_offset_shift = 0x1, +}; + +static const struct of_device_id cdns_torrent_phy_of_match[] = { + { + .compatible = "cdns,torrent-phy", + .data = &cdns_map_torrent, + }, + { + .compatible = "ti,j721e-serdes-10g", + .data = &ti_j721e_map_torrent, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match); + +static struct platform_driver cdns_torrent_phy_driver = { + .probe = cdns_torrent_phy_probe, + .remove = cdns_torrent_phy_remove, + .driver = { + .name = "cdns-torrent-phy", + .of_match_table = cdns_torrent_phy_of_match, + } +}; +module_platform_driver(cdns_torrent_phy_driver); + +MODULE_AUTHOR("Cadence Design Systems, Inc."); +MODULE_DESCRIPTION("Cadence Torrent PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index cb2ed3b25068..cdbcc49f7115 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -43,6 +43,8 @@ #define PA0_RG_USB20_INTR_EN BIT(5) #define U3P_USBPHYACR1 0x004 +#define PA1_RG_INTR_CAL GENMASK(23, 19) +#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19) #define PA1_RG_VRT_SEL GENMASK(14, 12) #define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12) #define PA1_RG_TERM_SEL GENMASK(10, 8) @@ -60,6 +62,8 @@ #define U3P_USBPHYACR6 0x018 #define PA6_RG_U2_BC11_SW_EN BIT(23) #define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20) +#define PA6_RG_U2_DISCTH GENMASK(7, 4) +#define PA6_RG_U2_DISCTH_VAL(x) ((0xf & (x)) << 4) #define PA6_RG_U2_SQTH GENMASK(3, 0) #define PA6_RG_U2_SQTH_VAL(x) (0xf & (x)) @@ -294,20 +298,21 @@ struct mtk_phy_instance { struct u2phy_banks u2_banks; struct u3phy_banks u3_banks; }; - struct clk *ref_clk; /* reference clock of anolog phy */ + struct clk *ref_clk; /* reference clock of (digital) phy */ + struct clk *da_ref_clk; /* reference clock of analog phy */ u32 index; u8 type; int eye_src; int eye_vrt; int eye_term; + int intr; + int discth; bool bc12_en; }; struct mtk_tphy { struct device *dev; void __iomem *sif_base; /* only shared sif */ - /* deprecated, use @ref_clk instead in phy instance */ - struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */ const struct mtk_phy_pdata *pdata; struct mtk_phy_instance **phys; int nphys; @@ -850,9 +855,14 @@ static void phy_parse_property(struct mtk_tphy *tphy, &instance->eye_vrt); device_property_read_u32(dev, "mediatek,eye-term", &instance->eye_term); - dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d\n", + device_property_read_u32(dev, "mediatek,intr", + &instance->intr); + device_property_read_u32(dev, "mediatek,discth", + &instance->discth); + dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n", instance->bc12_en, instance->eye_src, - instance->eye_vrt, instance->eye_term); + instance->eye_vrt, instance->eye_term, + instance->intr, instance->discth); } static void u2_phy_props_set(struct mtk_tphy *tphy, @@ -888,6 +898,20 @@ static void u2_phy_props_set(struct mtk_tphy *tphy, tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term); writel(tmp, com + U3P_USBPHYACR1); } + + if (instance->intr) { + tmp = readl(com + U3P_USBPHYACR1); + tmp &= ~PA1_RG_INTR_CAL; + tmp |= PA1_RG_INTR_CAL_VAL(instance->intr); + writel(tmp, com + U3P_USBPHYACR1); + } + + if (instance->discth) { + tmp = readl(com + U3P_USBPHYACR6); + tmp &= ~PA6_RG_U2_DISCTH; + tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth); + writel(tmp, com + U3P_USBPHYACR6); + } } static int mtk_phy_init(struct phy *phy) @@ -896,15 +920,16 @@ static int mtk_phy_init(struct phy *phy) struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent); int ret; - ret = clk_prepare_enable(tphy->u3phya_ref); + ret = clk_prepare_enable(instance->ref_clk); if (ret) { - dev_err(tphy->dev, "failed to enable u3phya_ref\n"); + dev_err(tphy->dev, "failed to enable ref_clk\n"); return ret; } - ret = clk_prepare_enable(instance->ref_clk); + ret = clk_prepare_enable(instance->da_ref_clk); if (ret) { - dev_err(tphy->dev, "failed to enable ref_clk\n"); + dev_err(tphy->dev, "failed to enable da_ref\n"); + clk_disable_unprepare(instance->ref_clk); return ret; } @@ -967,7 +992,7 @@ static int mtk_phy_exit(struct phy *phy) u2_phy_instance_exit(tphy, instance); clk_disable_unprepare(instance->ref_clk); - clk_disable_unprepare(tphy->u3phya_ref); + clk_disable_unprepare(instance->da_ref_clk); return 0; } @@ -1102,11 +1127,6 @@ static int mtk_tphy_probe(struct platform_device *pdev) } } - /* it's deprecated, make it optional for backward compatibility */ - tphy->u3phya_ref = devm_clk_get_optional(dev, "u3phya_ref"); - if (IS_ERR(tphy->u3phya_ref)) - return PTR_ERR(tphy->u3phya_ref); - tphy->src_ref_clk = U3P_REF_CLK; tphy->src_coef = U3P_SLEW_RATE_COEF; /* update parameters of slew rate calibrate if exist */ @@ -1153,16 +1173,20 @@ static int mtk_tphy_probe(struct platform_device *pdev) phy_set_drvdata(phy, instance); port++; - /* if deprecated clock is provided, ignore instance's one */ - if (tphy->u3phya_ref) - continue; - - instance->ref_clk = devm_clk_get(&phy->dev, "ref"); + instance->ref_clk = devm_clk_get_optional(&phy->dev, "ref"); if (IS_ERR(instance->ref_clk)) { dev_err(dev, "failed to get ref_clk(id-%d)\n", port); retval = PTR_ERR(instance->ref_clk); goto put_child; } + + instance->da_ref_clk = + devm_clk_get_optional(&phy->dev, "da_ref"); + if (IS_ERR(instance->da_ref_clk)) { + dev_err(dev, "failed to get da_ref_clk(id-%d)\n", port); + retval = PTR_ERR(instance->da_ref_clk); + goto put_child; + } } provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index f20524f0c21d..94a34cf75eb3 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -20,6 +20,7 @@ #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */ #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */ +#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */ #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */ #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */ @@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data) { struct phy_mdm6600 *ddata = data; struct gpio_desc *mode_gpio1; + int error, wakeup; mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1]; - dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", - gpiod_get_value(mode_gpio1)); + wakeup = gpiod_get_value(mode_gpio1); + if (!wakeup) + return IRQ_NONE; + + dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup); + error = pm_runtime_get_sync(ddata->dev); + if (error < 0) { + pm_runtime_put_noidle(ddata->dev); + + return IRQ_NONE; + } + + /* Just wake-up and kick the autosuspend timer */ + pm_runtime_mark_last_busy(ddata->dev); + pm_runtime_put_autosuspend(ddata->dev); return IRQ_HANDLED; } @@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work) ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work); phy_mdm6600_wake_modem(ddata); + + /* + * The modem does not always stay awake 1.2 seconds after toggling + * the wake GPIO, and sometimes it idles after about some 600 ms + * making writes time out. + */ schedule_delayed_work(&ddata->modem_wake_work, - msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS)); + msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS)); } static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index cd5a6c95dbdc..a27b8d578d7f 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -688,11 +688,9 @@ struct phy *phy_get(struct device *dev, const char *string) get_device(&phy->dev); link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); - if (!link) { - dev_err(dev, "failed to create device link to %s\n", + if (!link) + dev_dbg(dev, "failed to create device link to %s\n", dev_name(phy->dev.parent)); - return ERR_PTR(-EINVAL); - } return phy; } @@ -803,11 +801,9 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, } link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); - if (!link) { - dev_err(dev, "failed to create device link to %s\n", + if (!link) + dev_dbg(dev, "failed to create device link to %s\n", dev_name(phy->dev.parent)); - return ERR_PTR(-EINVAL); - } return phy; } @@ -852,11 +848,9 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, devres_add(dev, ptr); link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); - if (!link) { - dev_err(dev, "failed to create device link to %s\n", + if (!link) + dev_dbg(dev, "failed to create device link to %s\n", dev_name(phy->dev.parent)); - return ERR_PTR(-EINVAL); - } return phy; } diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index e46824da29f6..98674ed094d9 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -91,3 +91,23 @@ config PHY_QCOM_USB_HSIC select GENERIC_PHY help Support for the USB HSIC ULPI compliant PHY on QCOM chipsets. + +config PHY_QCOM_USB_HS_28NM + tristate "Qualcomm 28nm High-Speed PHY" + depends on ARCH_QCOM || COMPILE_TEST + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in + select GENERIC_PHY + help + Enable this to support the Qualcomm Synopsys DesignWare Core 28nm + High-Speed PHY driver. This driver supports the Hi-Speed PHY which + is usually paired with either the ChipIdea or Synopsys DWC3 USB + IPs on MSM SOCs. + +config PHY_QCOM_USB_SS + tristate "Qualcomm USB Super-Speed PHY driver" + depends on ARCH_QCOM || COMPILE_TEST + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in + select GENERIC_PHY + help + Enable this to support the Super-Speed USB transceiver on various + Qualcomm chipsets. diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile index 283251d6a5d9..1f14aeacbd70 100644 --- a/drivers/phy/qualcomm/Makefile +++ b/drivers/phy/qualcomm/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o +obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o +obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 7db2a94f7a99..c190406246ab 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -121,6 +121,11 @@ enum qphy_reg_layout { QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, }; +static const unsigned int msm8996_ufsphy_regs_layout[] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x168, +}; + static const unsigned int pciephy_regs_layout[] = { [QPHY_COM_SW_RESET] = 0x400, [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, @@ -160,6 +165,18 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = { [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, }; +static const unsigned int sdm845_qmp_pciephy_regs_layout[] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, +}; + +static const unsigned int sdm845_qhp_pciephy_regs_layout[] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x2ac, +}; + static const unsigned int sdm845_ufsphy_regs_layout[] = { [QPHY_START_CTRL] = 0x00, [QPHY_PCS_READY_STATUS] = 0x160, @@ -331,6 +348,75 @@ static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), }; +static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E), +}; + static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), @@ -481,6 +567,229 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3), }; +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = { +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f), +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), @@ -988,6 +1297,8 @@ struct qmp_phy_cfg { int rx_tbl_num; const struct qmp_phy_init_tbl *pcs_tbl; int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_misc_tbl; + int pcs_misc_tbl_num; /* clock ids to be requested */ const char * const *clk_list; @@ -1122,10 +1433,18 @@ static const char * const msm8996_phy_clk_l[] = { "aux", "cfg_ahb", "ref", }; +static const char * const msm8996_ufs_phy_clk_l[] = { + "ref", +}; + static const char * const qmp_v3_phy_clk_l[] = { "aux", "cfg_ahb", "ref", "com_aux", }; +static const char * const sdm845_pciephy_clk_l[] = { + "aux", "cfg_ahb", "ref", "refgen", +}; + static const char * const sdm845_ufs_phy_clk_l[] = { "ref", "ref_aux", }; @@ -1139,6 +1458,10 @@ static const char * const msm8996_usb3phy_reset_l[] = { "phy", "common", }; +static const char * const sdm845_pciephy_reset_l[] = { + "phy", +}; + /* list of regulators */ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", @@ -1175,6 +1498,31 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = { .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; +static const struct qmp_phy_cfg msm8996_ufs_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 1, + + .serdes_tbl = msm8996_ufs_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl), + .tx_tbl = msm8996_ufs_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl), + .rx_tbl = msm8996_ufs_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl), + + .clk_list = msm8996_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l), + + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + + .regs = msm8996_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + + .no_pcs_sw_reset = true, +}; + static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -1234,6 +1582,64 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .pwrdn_delay_max = 1005, /* us */ }; +static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl), + .tx_tbl = sdm845_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl), + .rx_tbl = sdm845_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl), + .pcs_tbl = sdm845_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qmp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qhp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl), + .tx_tbl = sdm845_qhp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl), + .rx_tbl = sdm845_qhp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl), + .pcs_tbl = sdm845_qhp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qhp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -1563,6 +1969,7 @@ static int qcom_qmp_phy_enable(struct phy *phy) void __iomem *tx = qphy->tx; void __iomem *rx = qphy->rx; void __iomem *pcs = qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; void __iomem *dp_com = qmp->dp_com; void __iomem *status; unsigned int mask, val, ready; @@ -1633,6 +2040,9 @@ static int qcom_qmp_phy_enable(struct phy *phy) if (ret) goto err_lane_rst; + qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, + cfg->pcs_misc_tbl_num); + /* * Pull out PHY from POWER DOWN state. * This is active low enable signal to power-down PHY. @@ -1967,7 +2377,7 @@ static const struct phy_ops qcom_qmp_phy_gen_ops = { .owner = THIS_MODULE, }; -static const struct phy_ops qcom_qmp_ufs_ops = { +static const struct phy_ops qcom_qmp_pcie_ufs_ops = { .power_on = qcom_qmp_phy_enable, .power_off = qcom_qmp_phy_disable, .set_mode = qcom_qmp_phy_set_mode, @@ -2067,8 +2477,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) } } - if (qmp->cfg->type == PHY_TYPE_UFS) - ops = &qcom_qmp_ufs_ops; + if (qmp->cfg->type == PHY_TYPE_UFS || qmp->cfg->type == PHY_TYPE_PCIE) + ops = &qcom_qmp_pcie_ufs_ops; generic_phy = devm_phy_create(dev, np, ops); if (IS_ERR(generic_phy)) { @@ -2091,6 +2501,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,msm8996-qmp-pcie-phy", .data = &msm8996_pciephy_cfg, }, { + .compatible = "qcom,msm8996-qmp-ufs-phy", + .data = &msm8996_ufs_cfg, + }, { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, }, { @@ -2103,6 +2516,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,ipq8074-qmp-pcie-phy", .data = &ipq8074_pciephy_cfg, }, { + .compatible = "qcom,sdm845-qhp-pcie-phy", + .data = &sdm845_qhp_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-pcie-phy", + .data = &sdm845_qmp_pciephy_cfg, + }, { .compatible = "qcom,sdm845-qmp-usb3-phy", .data = &qmp_v3_usb3phy_cfg, }, { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 90f793c2293d..dece0e67704b 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -409,4 +409,118 @@ #define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8 #define QPHY_V4_MULTI_LANE_CTRL1 0x1e0 +/* PCIE GEN3 COM registers */ +#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14 +#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20 +#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38 +#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54 +#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78 +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c +#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc +#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0 +#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158 +#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178 +#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8 +#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc +#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0 +#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0 +#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8 +#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0 +#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc +#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c +#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c + +/* PCIE GEN3 QHP Lane registers */ +#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc +#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10 +#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14 +#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18 +#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60 +#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64 +#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0 +#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc +#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100 +#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108 +#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114 +#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118 +#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c +#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120 +#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124 +#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128 +#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130 +#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134 +#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138 +#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c +#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154 +#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c +#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178 +#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198 +#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c +#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8 +#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230 +#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234 +#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238 +#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4 +#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8 +#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac +#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0 +#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8 +#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0 +#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4 +#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc + +/* PCIE GEN3 PCS registers */ +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40 +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54 +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68 +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c +#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174 + #endif diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index bf94a52d3087..3708d43b7508 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved. */ #include <linux/clk.h> @@ -66,6 +66,14 @@ #define IMP_RES_OFFSET_MASK GENMASK(5, 0) #define IMP_RES_OFFSET_SHIFT 0x0 +/* QUSB2PHY_PLL_BIAS_CONTROL_2 register bits */ +#define BIAS_CTRL2_RES_OFFSET_MASK GENMASK(5, 0) +#define BIAS_CTRL2_RES_OFFSET_SHIFT 0x0 + +/* QUSB2PHY_CHG_CONTROL_2 register bits */ +#define CHG_CTRL2_OFFSET_MASK GENMASK(5, 4) +#define CHG_CTRL2_OFFSET_SHIFT 0x4 + /* QUSB2PHY_PORT_TUNE1 register bits */ #define HSTX_TRIM_MASK GENMASK(7, 4) #define HSTX_TRIM_SHIFT 0x4 @@ -73,6 +81,10 @@ #define PREEMPHASIS_EN_MASK GENMASK(1, 0) #define PREEMPHASIS_EN_SHIFT 0x0 +/* QUSB2PHY_PORT_TUNE2 register bits */ +#define HSDISC_TRIM_MASK GENMASK(1, 0) +#define HSDISC_TRIM_SHIFT 0x0 + #define QUSB2PHY_PLL_ANALOG_CONTROLS_TWO 0x04 #define QUSB2PHY_PLL_CLOCK_INVERTERS 0x18c #define QUSB2PHY_PLL_CMODE 0x2c @@ -177,7 +189,7 @@ static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = { QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19), }; -static const unsigned int sdm845_regs_layout[] = { +static const unsigned int qusb2_v2_regs_layout[] = { [QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8, [QUSB2PHY_PLL_STATUS] = 0x1a0, [QUSB2PHY_PORT_TUNE1] = 0x240, @@ -191,7 +203,7 @@ static const unsigned int sdm845_regs_layout[] = { [QUSB2PHY_INTR_CTRL] = 0x230, }; -static const struct qusb2_phy_init_tbl sdm845_init_tbl[] = { +static const struct qusb2_phy_init_tbl qusb2_v2_init_tbl[] = { QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_ANALOG_CONTROLS_TWO, 0x03), QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CLOCK_INVERTERS, 0x7c), QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CMODE, 0x80), @@ -258,10 +270,10 @@ static const struct qusb2_phy_cfg msm8998_phy_cfg = { .update_tune1_with_efuse = true, }; -static const struct qusb2_phy_cfg sdm845_phy_cfg = { - .tbl = sdm845_init_tbl, - .tbl_num = ARRAY_SIZE(sdm845_init_tbl), - .regs = sdm845_regs_layout, +static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = { + .tbl = qusb2_v2_init_tbl, + .tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl), + .regs = qusb2_v2_regs_layout, .disable_ctrl = (PWR_CTRL1_VREF_SUPPLY_TRIM | PWR_CTRL1_CLAMP_N_EN | POWER_DOWN), @@ -277,6 +289,34 @@ static const char * const qusb2_phy_vreg_names[] = { #define QUSB2_NUM_VREGS ARRAY_SIZE(qusb2_phy_vreg_names) +/* struct override_param - structure holding qusb2 v2 phy overriding param + * set override true if the device tree property exists and read and assign + * to value + */ +struct override_param { + bool override; + u8 value; +}; + +/*struct override_params - structure holding qusb2 v2 phy overriding params + * @imp_res_offset: rescode offset to be updated in IMP_CTRL1 register + * @hstx_trim: HSTX_TRIM to be updated in TUNE1 register + * @preemphasis: Amplitude Pre-Emphasis to be updated in TUNE1 register + * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1 + * @bias_ctrl: bias ctrl to be updated in BIAS_CONTROL_2 register + * @charge_ctrl: charge ctrl to be updated in CHG_CTRL2 register + * @hsdisc_trim: disconnect threshold to be updated in TUNE2 register + */ +struct override_params { + struct override_param imp_res_offset; + struct override_param hstx_trim; + struct override_param preemphasis; + struct override_param preemphasis_width; + struct override_param bias_ctrl; + struct override_param charge_ctrl; + struct override_param hsdisc_trim; +}; + /** * struct qusb2_phy - structure holding qusb2 phy attributes * @@ -292,14 +332,7 @@ static const char * const qusb2_phy_vreg_names[] = { * @tcsr: TCSR syscon register map * @cell: nvmem cell containing phy tuning value * - * @override_imp_res_offset: PHY should use different rescode offset - * @imp_res_offset_value: rescode offset to be updated in IMP_CTRL1 register - * @override_hstx_trim: PHY should use different HSTX o/p current value - * @hstx_trim_value: HSTX_TRIM value to be updated in TUNE1 register - * @override_preemphasis: PHY should use different pre-amphasis amplitude - * @preemphasis_level: Amplitude Pre-Emphasis to be updated in TUNE1 register - * @override_preemphasis_width: PHY should use different pre-emphasis duration - * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1 + * @overrides: pointer to structure for all overriding tuning params * * @cfg: phy config data * @has_se_clk_scheme: indicate if PHY has single-ended ref clock scheme @@ -319,14 +352,7 @@ struct qusb2_phy { struct regmap *tcsr; struct nvmem_cell *cell; - bool override_imp_res_offset; - u8 imp_res_offset_value; - bool override_hstx_trim; - u8 hstx_trim_value; - bool override_preemphasis; - u8 preemphasis_level; - bool override_preemphasis_width; - u8 preemphasis_width; + struct override_params overrides; const struct qusb2_phy_cfg *cfg; bool has_se_clk_scheme; @@ -394,24 +420,35 @@ void qcom_qusb2_phy_configure(void __iomem *base, static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy) { const struct qusb2_phy_cfg *cfg = qphy->cfg; + struct override_params *or = &qphy->overrides; - if (qphy->override_imp_res_offset) + if (or->imp_res_offset.override) qusb2_write_mask(qphy->base, QUSB2PHY_IMP_CTRL1, - qphy->imp_res_offset_value << IMP_RES_OFFSET_SHIFT, + or->imp_res_offset.value << IMP_RES_OFFSET_SHIFT, IMP_RES_OFFSET_MASK); - if (qphy->override_hstx_trim) + if (or->bias_ctrl.override) + qusb2_write_mask(qphy->base, QUSB2PHY_PLL_BIAS_CONTROL_2, + or->bias_ctrl.value << BIAS_CTRL2_RES_OFFSET_SHIFT, + BIAS_CTRL2_RES_OFFSET_MASK); + + if (or->charge_ctrl.override) + qusb2_write_mask(qphy->base, QUSB2PHY_CHG_CTRL2, + or->charge_ctrl.value << CHG_CTRL2_OFFSET_SHIFT, + CHG_CTRL2_OFFSET_MASK); + + if (or->hstx_trim.override) qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - qphy->hstx_trim_value << HSTX_TRIM_SHIFT, + or->hstx_trim.value << HSTX_TRIM_SHIFT, HSTX_TRIM_MASK); - if (qphy->override_preemphasis) + if (or->preemphasis.override) qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - qphy->preemphasis_level << PREEMPHASIS_EN_SHIFT, + or->preemphasis.value << PREEMPHASIS_EN_SHIFT, PREEMPHASIS_EN_MASK); - if (qphy->override_preemphasis_width) { - if (qphy->preemphasis_width == + if (or->preemphasis_width.override) { + if (or->preemphasis_width.value == QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT) qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], @@ -421,6 +458,11 @@ static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy) cfg->regs[QUSB2PHY_PORT_TUNE1], PREEMPH_WIDTH_HALF_BIT); } + + if (or->hsdisc_trim.override) + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], + or->hsdisc_trim.value << HSDISC_TRIM_SHIFT, + HSDISC_TRIM_MASK); } /* @@ -774,8 +816,8 @@ static const struct of_device_id qusb2_phy_of_match_table[] = { .compatible = "qcom,msm8998-qusb2-phy", .data = &msm8998_phy_cfg, }, { - .compatible = "qcom,sdm845-qusb2-phy", - .data = &sdm845_phy_cfg, + .compatible = "qcom,qusb2-v2-phy", + .data = &qusb2_v2_phy_cfg, }, { }, }; @@ -796,10 +838,12 @@ static int qusb2_phy_probe(struct platform_device *pdev) int ret, i; int num; u32 value; + struct override_params *or; qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); if (!qphy) return -ENOMEM; + or = &qphy->overrides; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); qphy->base = devm_ioremap_resource(dev, res); @@ -864,26 +908,44 @@ static int qusb2_phy_probe(struct platform_device *pdev) if (!of_property_read_u32(dev->of_node, "qcom,imp-res-offset-value", &value)) { - qphy->imp_res_offset_value = (u8)value; - qphy->override_imp_res_offset = true; + or->imp_res_offset.value = (u8)value; + or->imp_res_offset.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,bias-ctrl-value", + &value)) { + or->bias_ctrl.value = (u8)value; + or->bias_ctrl.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,charge-ctrl-value", + &value)) { + or->charge_ctrl.value = (u8)value; + or->charge_ctrl.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,hstx-trim-value", &value)) { - qphy->hstx_trim_value = (u8)value; - qphy->override_hstx_trim = true; + or->hstx_trim.value = (u8)value; + or->hstx_trim.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-level", &value)) { - qphy->preemphasis_level = (u8)value; - qphy->override_preemphasis = true; + or->preemphasis.value = (u8)value; + or->preemphasis.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-width", &value)) { - qphy->preemphasis_width = (u8)value; - qphy->override_preemphasis_width = true; + or->preemphasis_width.value = (u8)value; + or->preemphasis_width.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,hsdisc-trim-value", + &value)) { + or->hsdisc_trim.value = (u8)value; + or->hsdisc_trim.override = true; } pm_runtime_set_active(dev); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c new file mode 100644 index 000000000000..d998e65c89c8 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2009-2018, Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, Linaro Limited + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> + +/* PHY register and bit definitions */ +#define PHY_CTRL_COMMON0 0x078 +#define SIDDQ BIT(2) +#define PHY_IRQ_CMD 0x0d0 +#define PHY_INTR_MASK0 0x0d4 +#define PHY_INTR_CLEAR0 0x0dc +#define DPDM_MASK 0x1e +#define DP_1_0 BIT(4) +#define DP_0_1 BIT(3) +#define DM_1_0 BIT(2) +#define DM_0_1 BIT(1) + +enum hsphy_voltage { + VOL_NONE, + VOL_MIN, + VOL_MAX, + VOL_NUM, +}; + +enum hsphy_vreg { + VDD, + VDDA_1P8, + VDDA_3P3, + VREG_NUM, +}; + +struct hsphy_init_seq { + int offset; + int val; + int delay; +}; + +struct hsphy_data { + const struct hsphy_init_seq *init_seq; + unsigned int init_seq_num; +}; + +struct hsphy_priv { + void __iomem *base; + struct clk_bulk_data *clks; + int num_clks; + struct reset_control *phy_reset; + struct reset_control *por_reset; + struct regulator_bulk_data vregs[VREG_NUM]; + const struct hsphy_data *data; + enum phy_mode mode; +}; + +static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + + priv->mode = PHY_MODE_INVALID; + + if (mode > 0) + priv->mode = mode; + + return 0; +} + +static void qcom_snps_hsphy_enable_hv_interrupts(struct hsphy_priv *priv) +{ + u32 val; + + /* Clear any existing interrupts before enabling the interrupts */ + val = readb(priv->base + PHY_INTR_CLEAR0); + val |= DPDM_MASK; + writeb(val, priv->base + PHY_INTR_CLEAR0); + + writeb(0x0, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); + writeb(0x1, priv->base + PHY_IRQ_CMD); + + /* Make sure the interrupts are cleared */ + usleep_range(200, 220); + + val = readb(priv->base + PHY_INTR_MASK0); + switch (priv->mode) { + case PHY_MODE_USB_HOST_HS: + case PHY_MODE_USB_HOST_FS: + case PHY_MODE_USB_DEVICE_HS: + case PHY_MODE_USB_DEVICE_FS: + val |= DP_1_0 | DM_0_1; + break; + case PHY_MODE_USB_HOST_LS: + case PHY_MODE_USB_DEVICE_LS: + val |= DP_0_1 | DM_1_0; + break; + default: + /* No device connected */ + val |= DP_0_1 | DM_0_1; + break; + } + writeb(val, priv->base + PHY_INTR_MASK0); +} + +static void qcom_snps_hsphy_disable_hv_interrupts(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_INTR_MASK0); + val &= ~DPDM_MASK; + writeb(val, priv->base + PHY_INTR_MASK0); + + /* Clear any pending interrupts */ + val = readb(priv->base + PHY_INTR_CLEAR0); + val |= DPDM_MASK; + writeb(val, priv->base + PHY_INTR_CLEAR0); + + writeb(0x0, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); + + writeb(0x1, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); +} + +static void qcom_snps_hsphy_enter_retention(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_CTRL_COMMON0); + val |= SIDDQ; + writeb(val, priv->base + PHY_CTRL_COMMON0); +} + +static void qcom_snps_hsphy_exit_retention(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_CTRL_COMMON0); + val &= ~SIDDQ; + writeb(val, priv->base + PHY_CTRL_COMMON0); +} + +static int qcom_snps_hsphy_power_on(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(VREG_NUM, priv->vregs); + if (ret) + return ret; + ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks); + if (ret) + goto err_disable_regulator; + qcom_snps_hsphy_disable_hv_interrupts(priv); + qcom_snps_hsphy_exit_retention(priv); + + return 0; + +err_disable_regulator: + regulator_bulk_disable(VREG_NUM, priv->vregs); + + return ret; +} + +static int qcom_snps_hsphy_power_off(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + + qcom_snps_hsphy_enter_retention(priv); + qcom_snps_hsphy_enable_hv_interrupts(priv); + clk_bulk_disable_unprepare(priv->num_clks, priv->clks); + regulator_bulk_disable(VREG_NUM, priv->vregs); + + return 0; +} + +static int qcom_snps_hsphy_reset(struct hsphy_priv *priv) +{ + int ret; + + ret = reset_control_assert(priv->phy_reset); + if (ret) + return ret; + + usleep_range(10, 15); + + ret = reset_control_deassert(priv->phy_reset); + if (ret) + return ret; + + usleep_range(80, 100); + + return 0; +} + +static void qcom_snps_hsphy_init_sequence(struct hsphy_priv *priv) +{ + const struct hsphy_data *data = priv->data; + const struct hsphy_init_seq *seq; + int i; + + /* Device match data is optional. */ + if (!data) + return; + + seq = data->init_seq; + + for (i = 0; i < data->init_seq_num; i++, seq++) { + writeb(seq->val, priv->base + seq->offset); + if (seq->delay) + usleep_range(seq->delay, seq->delay + 10); + } +} + +static int qcom_snps_hsphy_por_reset(struct hsphy_priv *priv) +{ + int ret; + + ret = reset_control_assert(priv->por_reset); + if (ret) + return ret; + + /* + * The Femto PHY is POR reset in the following scenarios. + * + * 1. After overriding the parameter registers. + * 2. Low power mode exit from PHY retention. + * + * Ensure that SIDDQ is cleared before bringing the PHY + * out of reset. + */ + qcom_snps_hsphy_exit_retention(priv); + + /* + * As per databook, 10 usec delay is required between + * PHY POR assert and de-assert. + */ + usleep_range(10, 20); + ret = reset_control_deassert(priv->por_reset); + if (ret) + return ret; + + /* + * As per databook, it takes 75 usec for PHY to stabilize + * after the reset. + */ + usleep_range(80, 100); + + return 0; +} + +static int qcom_snps_hsphy_init(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = qcom_snps_hsphy_reset(priv); + if (ret) + return ret; + + qcom_snps_hsphy_init_sequence(priv); + + ret = qcom_snps_hsphy_por_reset(priv); + if (ret) + return ret; + + return 0; +} + +static const struct phy_ops qcom_snps_hsphy_ops = { + .init = qcom_snps_hsphy_init, + .power_on = qcom_snps_hsphy_power_on, + .power_off = qcom_snps_hsphy_power_off, + .set_mode = qcom_snps_hsphy_set_mode, + .owner = THIS_MODULE, +}; + +static const char * const qcom_snps_hsphy_clks[] = { + "ref", + "ahb", + "sleep", +}; + +static int qcom_snps_hsphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct hsphy_priv *priv; + struct phy *phy; + int ret; + int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->num_clks = ARRAY_SIZE(qcom_snps_hsphy_clks); + priv->clks = devm_kcalloc(dev, priv->num_clks, sizeof(*priv->clks), + GFP_KERNEL); + if (!priv->clks) + return -ENOMEM; + + for (i = 0; i < priv->num_clks; i++) + priv->clks[i].id = qcom_snps_hsphy_clks[i]; + + ret = devm_clk_bulk_get(dev, priv->num_clks, priv->clks); + if (ret) + return ret; + + priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(priv->phy_reset)) + return PTR_ERR(priv->phy_reset); + + priv->por_reset = devm_reset_control_get_exclusive(dev, "por"); + if (IS_ERR(priv->por_reset)) + return PTR_ERR(priv->por_reset); + + priv->vregs[VDD].supply = "vdd"; + priv->vregs[VDDA_1P8].supply = "vdda1p8"; + priv->vregs[VDDA_3P3].supply = "vdda3p3"; + + ret = devm_regulator_bulk_get(dev, VREG_NUM, priv->vregs); + if (ret) + return ret; + + /* Get device match data */ + priv->data = device_get_match_data(dev); + + phy = devm_phy_create(dev, dev->of_node, &qcom_snps_hsphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + ret = regulator_set_load(priv->vregs[VDDA_1P8].consumer, 19000); + if (ret < 0) + return ret; + + ret = regulator_set_load(priv->vregs[VDDA_3P3].consumer, 16000); + if (ret < 0) + goto unset_1p8_load; + + return 0; + +unset_1p8_load: + regulator_set_load(priv->vregs[VDDA_1P8].consumer, 0); + + return ret; +} + +/* + * The macro is used to define an initialization sequence. Each tuple + * is meant to program 'value' into phy register at 'offset' with 'delay' + * in us followed. + */ +#define HSPHY_INIT_CFG(o, v, d) { .offset = o, .val = v, .delay = d, } + +static const struct hsphy_init_seq init_seq_femtophy[] = { + HSPHY_INIT_CFG(0xc0, 0x01, 0), + HSPHY_INIT_CFG(0xe8, 0x0d, 0), + HSPHY_INIT_CFG(0x74, 0x12, 0), + HSPHY_INIT_CFG(0x98, 0x63, 0), + HSPHY_INIT_CFG(0x9c, 0x03, 0), + HSPHY_INIT_CFG(0xa0, 0x1d, 0), + HSPHY_INIT_CFG(0xa4, 0x03, 0), + HSPHY_INIT_CFG(0x8c, 0x23, 0), + HSPHY_INIT_CFG(0x78, 0x08, 0), + HSPHY_INIT_CFG(0x7c, 0xdc, 0), + HSPHY_INIT_CFG(0x90, 0xe0, 20), + HSPHY_INIT_CFG(0x74, 0x10, 0), + HSPHY_INIT_CFG(0x90, 0x60, 0), +}; + +static const struct hsphy_data hsphy_data_femtophy = { + .init_seq = init_seq_femtophy, + .init_seq_num = ARRAY_SIZE(init_seq_femtophy), +}; + +static const struct of_device_id qcom_snps_hsphy_match[] = { + { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match); + +static struct platform_driver qcom_snps_hsphy_driver = { + .probe = qcom_snps_hsphy_probe, + .driver = { + .name = "qcom,usb-hs-28nm-phy", + .of_match_table = qcom_snps_hsphy_match, + }, +}; +module_platform_driver(qcom_snps_hsphy_driver); + +MODULE_DESCRIPTION("Qualcomm 28nm Hi-Speed USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-ss.c b/drivers/phy/qualcomm/phy-qcom-usb-ss.c new file mode 100644 index 000000000000..a3a6d3ce7ea1 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-usb-ss.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014,2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, Linaro Limited + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#define PHY_CTRL0 0x6C +#define PHY_CTRL1 0x70 +#define PHY_CTRL2 0x74 +#define PHY_CTRL4 0x7C + +/* PHY_CTRL bits */ +#define REF_PHY_EN BIT(0) +#define LANE0_PWR_ON BIT(2) +#define SWI_PCS_CLK_SEL BIT(4) +#define TST_PWR_DOWN BIT(4) +#define PHY_RESET BIT(7) + +#define NUM_BULK_CLKS 3 +#define NUM_BULK_REGS 2 + +struct ssphy_priv { + void __iomem *base; + struct device *dev; + struct reset_control *reset_com; + struct reset_control *reset_phy; + struct regulator_bulk_data regs[NUM_BULK_REGS]; + struct clk_bulk_data clks[NUM_BULK_CLKS]; + enum phy_mode mode; +}; + +static inline void qcom_ssphy_updatel(void __iomem *addr, u32 mask, u32 val) +{ + writel((readl(addr) & ~mask) | val, addr); +} + +static int qcom_ssphy_do_reset(struct ssphy_priv *priv) +{ + int ret; + + if (!priv->reset_com) { + qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, + PHY_RESET); + usleep_range(10, 20); + qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, 0); + } else { + ret = reset_control_assert(priv->reset_com); + if (ret) { + dev_err(priv->dev, "Failed to assert reset com\n"); + return ret; + } + + ret = reset_control_assert(priv->reset_phy); + if (ret) { + dev_err(priv->dev, "Failed to assert reset phy\n"); + return ret; + } + + usleep_range(10, 20); + + ret = reset_control_deassert(priv->reset_com); + if (ret) { + dev_err(priv->dev, "Failed to deassert reset com\n"); + return ret; + } + + ret = reset_control_deassert(priv->reset_phy); + if (ret) { + dev_err(priv->dev, "Failed to deassert reset phy\n"); + return ret; + } + } + + return 0; +} + +static int qcom_ssphy_power_on(struct phy *phy) +{ + struct ssphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(NUM_BULK_REGS, priv->regs); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(NUM_BULK_CLKS, priv->clks); + if (ret) + goto err_disable_regulator; + + ret = qcom_ssphy_do_reset(priv); + if (ret) + goto err_disable_clock; + + writeb(SWI_PCS_CLK_SEL, priv->base + PHY_CTRL0); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, LANE0_PWR_ON); + qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, REF_PHY_EN); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, 0); + + return 0; +err_disable_clock: + clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); +err_disable_regulator: + regulator_bulk_disable(NUM_BULK_REGS, priv->regs); + + return ret; +} + +static int qcom_ssphy_power_off(struct phy *phy) +{ + struct ssphy_priv *priv = phy_get_drvdata(phy); + + qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, 0); + qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, 0); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, TST_PWR_DOWN); + + clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); + regulator_bulk_disable(NUM_BULK_REGS, priv->regs); + + return 0; +} + +static int qcom_ssphy_init_clock(struct ssphy_priv *priv) +{ + priv->clks[0].id = "ref"; + priv->clks[1].id = "ahb"; + priv->clks[2].id = "pipe"; + + return devm_clk_bulk_get(priv->dev, NUM_BULK_CLKS, priv->clks); +} + +static int qcom_ssphy_init_regulator(struct ssphy_priv *priv) +{ + int ret; + + priv->regs[0].supply = "vdd"; + priv->regs[1].supply = "vdda1p8"; + ret = devm_regulator_bulk_get(priv->dev, NUM_BULK_REGS, priv->regs); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(priv->dev, "Failed to get regulators\n"); + return ret; + } + + return ret; +} + +static int qcom_ssphy_init_reset(struct ssphy_priv *priv) +{ + priv->reset_com = devm_reset_control_get_optional_exclusive(priv->dev, "com"); + if (IS_ERR(priv->reset_com)) { + dev_err(priv->dev, "Failed to get reset control com\n"); + return PTR_ERR(priv->reset_com); + } + + if (priv->reset_com) { + /* if reset_com is present, reset_phy is no longer optional */ + priv->reset_phy = devm_reset_control_get_exclusive(priv->dev, "phy"); + if (IS_ERR(priv->reset_phy)) { + dev_err(priv->dev, "Failed to get reset control phy\n"); + return PTR_ERR(priv->reset_phy); + } + } + + return 0; +} + +static const struct phy_ops qcom_ssphy_ops = { + .power_off = qcom_ssphy_power_off, + .power_on = qcom_ssphy_power_on, + .owner = THIS_MODULE, +}; + +static int qcom_ssphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct ssphy_priv *priv; + struct phy *phy; + int ret; + + priv = devm_kzalloc(dev, sizeof(struct ssphy_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->mode = PHY_MODE_INVALID; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + ret = qcom_ssphy_init_clock(priv); + if (ret) + return ret; + + ret = qcom_ssphy_init_reset(priv); + if (ret) + return ret; + + ret = qcom_ssphy_init_regulator(priv); + if (ret) + return ret; + + phy = devm_phy_create(dev, dev->of_node, &qcom_ssphy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "Failed to create the SS phy\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id qcom_ssphy_match[] = { + { .compatible = "qcom,usb-ss-28nm-phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_ssphy_match); + +static struct platform_driver qcom_ssphy_driver = { + .probe = qcom_ssphy_probe, + .driver = { + .name = "qcom-usb-ssphy", + .of_match_table = qcom_ssphy_match, + }, +}; +module_platform_driver(qcom_ssphy_driver); + +MODULE_DESCRIPTION("Qualcomm SuperSpeed USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 680cc0c8825c..a84e9f027fc4 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -763,7 +763,7 @@ static void rockchip_chg_detect_work(struct work_struct *work) /* put the controller in normal mode */ property_enable(base, &rphy->phy_cfg->chg_det.opmode, true); rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); - dev_info(&rport->phy->dev, "charger = %s\n", + dev_dbg(&rport->phy->dev, "charger = %s\n", chg_to_string(rphy->chg_type)); return; default: diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c index 93ffbd2940fa..e4adab375c73 100644 --- a/drivers/phy/socionext/phy-uniphier-pcie.c +++ b/drivers/phy/socionext/phy-uniphier-pcie.c @@ -19,6 +19,10 @@ #include <linux/resource.h> /* PHY */ +#define PCL_PHY_CLKCTRL 0x0000 +#define PORT_SEL_MASK GENMASK(11, 9) +#define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1) + #define PCL_PHY_TEST_I 0x2000 #define PCL_PHY_TEST_O 0x2004 #define TESTI_DAT_MASK GENMASK(13, 6) @@ -45,13 +49,14 @@ struct uniphier_pciephy_priv { void __iomem *base; struct device *dev; - struct clk *clk; - struct reset_control *rst; + struct clk *clk, *clk_gio; + struct reset_control *rst, *rst_gio; const struct uniphier_pciephy_soc_data *data; }; struct uniphier_pciephy_soc_data { - bool has_syscon; + bool is_legacy; + void (*set_phymode)(struct regmap *regmap); }; static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv, @@ -111,16 +116,35 @@ static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv) static int uniphier_pciephy_init(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); + u32 val; int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; - ret = reset_control_deassert(priv->rst); + ret = clk_prepare_enable(priv->clk_gio); if (ret) goto out_clk_disable; + ret = reset_control_deassert(priv->rst); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_gio); + if (ret) + goto out_rst_assert; + + /* support only 1 port */ + val = readl(priv->base + PCL_PHY_CLKCTRL); + val &= ~PORT_SEL_MASK; + val |= PORT_SEL_1; + writel(val, priv->base + PCL_PHY_CLKCTRL); + + /* legacy controller doesn't have phy_reset and parameters */ + if (priv->data->is_legacy) + return 0; + uniphier_pciephy_set_param(priv, PCL_PHY_R00, RX_EQ_ADJ_EN, RX_EQ_ADJ_EN); uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ, @@ -134,6 +158,10 @@ static int uniphier_pciephy_init(struct phy *phy) return 0; +out_rst_assert: + reset_control_assert(priv->rst); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_gio); out_clk_disable: clk_disable_unprepare(priv->clk); @@ -144,8 +172,11 @@ static int uniphier_pciephy_exit(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); - uniphier_pciephy_assert(priv); + if (!priv->data->is_legacy) + uniphier_pciephy_assert(priv); + reset_control_assert(priv->rst_gio); reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk_gio); clk_disable_unprepare(priv->clk); return 0; @@ -163,7 +194,6 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct regmap *regmap; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -176,18 +206,36 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) priv->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - priv->rst = devm_reset_control_get_shared(dev, NULL); - if (IS_ERR(priv->rst)) - return PTR_ERR(priv->rst); + if (priv->data->is_legacy) { + priv->clk_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_gio)) + return PTR_ERR(priv->clk_gio); + + priv->rst_gio = + devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_gio)) + return PTR_ERR(priv->rst_gio); + + priv->clk = devm_clk_get(dev, "link"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, "link"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + } else { + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + } phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops); if (IS_ERR(phy)) @@ -195,9 +243,8 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "socionext,syscon"); - if (!IS_ERR(regmap) && priv->data->has_syscon) - regmap_update_bits(regmap, SG_USBPCIESEL, - SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE); + if (!IS_ERR(regmap) && priv->data->set_phymode) + priv->data->set_phymode(regmap); phy_set_drvdata(phy, priv); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); @@ -205,16 +252,31 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } +static void uniphier_pciephy_ld20_setmode(struct regmap *regmap) +{ + regmap_update_bits(regmap, SG_USBPCIESEL, + SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE); +} + +static const struct uniphier_pciephy_soc_data uniphier_pro5_data = { + .is_legacy = true, +}; + static const struct uniphier_pciephy_soc_data uniphier_ld20_data = { - .has_syscon = true, + .is_legacy = false, + .set_phymode = uniphier_pciephy_ld20_setmode, }; static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = { - .has_syscon = false, + .is_legacy = false, }; static const struct of_device_id uniphier_pciephy_match[] = { { + .compatible = "socionext,uniphier-pro5-pcie-phy", + .data = &uniphier_pro5_data, + }, + { .compatible = "socionext,uniphier-ld20-pcie-phy", .data = &uniphier_ld20_data, }, diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c index 50f379fc4e06..a9bc74121f38 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3hs.c +++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c @@ -41,10 +41,12 @@ #define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) } +#define RX_CHK_SYNC PHY_F(0, 5, 5) /* RX sync mode */ +#define RX_SYNC_SEL PHY_F(1, 1, 0) /* RX sync length */ #define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */ #define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */ -#define MAX_PHY_PARAMS 2 +#define MAX_PHY_PARAMS 4 struct uniphier_u3hsphy_param { struct { @@ -66,13 +68,14 @@ struct uniphier_u3hsphy_trim_param { struct uniphier_u3hsphy_priv { struct device *dev; void __iomem *base; - struct clk *clk, *clk_parent, *clk_ext; - struct reset_control *rst, *rst_parent; + struct clk *clk, *clk_parent, *clk_ext, *clk_parent_gio; + struct reset_control *rst, *rst_parent, *rst_parent_gio; struct regulator *vbus; const struct uniphier_u3hsphy_soc_data *data; }; struct uniphier_u3hsphy_soc_data { + bool is_legacy; int nparams; const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS]; u32 config0; @@ -256,11 +259,20 @@ static int uniphier_u3hsphy_init(struct phy *phy) if (ret) return ret; - ret = reset_control_deassert(priv->rst_parent); + ret = clk_prepare_enable(priv->clk_parent_gio); if (ret) goto out_clk_disable; - if (!priv->data->config0 && !priv->data->config1) + ret = reset_control_deassert(priv->rst_parent); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_parent_gio); + if (ret) + goto out_rst_assert; + + if ((priv->data->is_legacy) + || (!priv->data->config0 && !priv->data->config1)) return 0; config0 = priv->data->config0; @@ -280,6 +292,8 @@ static int uniphier_u3hsphy_init(struct phy *phy) out_rst_assert: reset_control_assert(priv->rst_parent); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_parent_gio); out_clk_disable: clk_disable_unprepare(priv->clk_parent); @@ -290,7 +304,9 @@ static int uniphier_u3hsphy_exit(struct phy *phy) { struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy); + reset_control_assert(priv->rst_parent_gio); reset_control_assert(priv->rst_parent); + clk_disable_unprepare(priv->clk_parent_gio); clk_disable_unprepare(priv->clk_parent); return 0; @@ -309,7 +325,6 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct uniphier_u3hsphy_priv *priv; struct phy_provider *phy_provider; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -322,27 +337,38 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) priv->data->nparams > MAX_PHY_PARAMS)) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(dev, "phy"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + if (!priv->data->is_legacy) { + priv->clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->clk_ext = devm_clk_get_optional(dev, "phy-ext"); + if (IS_ERR(priv->clk_ext)) + return PTR_ERR(priv->clk_ext); + + priv->rst = devm_reset_control_get_shared(dev, "phy"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + + } else { + priv->clk_parent_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_parent_gio)) + return PTR_ERR(priv->clk_parent_gio); + + priv->rst_parent_gio = + devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_parent_gio)) + return PTR_ERR(priv->rst_parent_gio); + } priv->clk_parent = devm_clk_get(dev, "link"); if (IS_ERR(priv->clk_parent)) return PTR_ERR(priv->clk_parent); - priv->clk_ext = devm_clk_get_optional(dev, "phy-ext"); - if (IS_ERR(priv->clk_ext)) - return PTR_ERR(priv->clk_ext); - - priv->rst = devm_reset_control_get_shared(dev, "phy"); - if (IS_ERR(priv->rst)) - return PTR_ERR(priv->rst); - priv->rst_parent = devm_reset_control_get_shared(dev, "link"); if (IS_ERR(priv->rst_parent)) return PTR_ERR(priv->rst_parent); @@ -364,13 +390,26 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } -static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = { +static const struct uniphier_u3hsphy_soc_data uniphier_pro5_data = { + .is_legacy = true, .nparams = 0, }; -static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { +static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = { + .is_legacy = false, .nparams = 2, .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, + }, +}; + +static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { + .is_legacy = false, + .nparams = 4, + .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, { LS_SLEW, 1 }, { FS_LS_DRV, 1 }, }, @@ -380,7 +419,12 @@ static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { }; static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = { - .nparams = 0, + .is_legacy = false, + .nparams = 2, + .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, + }, .trim_func = uniphier_u3hsphy_trim_ld20, .config0 = 0x92316680, .config1 = 0x00000106, @@ -388,6 +432,10 @@ static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = { static const struct of_device_id uniphier_u3hsphy_match[] = { { + .compatible = "socionext,uniphier-pro5-usb3-hsphy", + .data = &uniphier_pro5_data, + }, + { .compatible = "socionext,uniphier-pxs2-usb3-hsphy", .data = &uniphier_pxs2_data, }, diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index ec231e40ef2a..6700645bcbe6 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -215,7 +215,6 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct uniphier_u3ssphy_priv *priv; struct phy_provider *phy_provider; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -228,8 +227,7 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev) priv->data->nparams > MAX_PHY_PARAMS)) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); @@ -315,6 +313,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = { .data = &uniphier_pro4_data, }, { + .compatible = "socionext,uniphier-pro5-usb3-ssphy", + .data = &uniphier_pro4_data, + }, + { .compatible = "socionext,uniphier-pxs2-usb3-ssphy", .data = &uniphier_pxs2_data, }, diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig index f9817c3ae85f..a208aca4ba7b 100644 --- a/drivers/phy/tegra/Kconfig +++ b/drivers/phy/tegra/Kconfig @@ -2,6 +2,8 @@ config PHY_TEGRA_XUSB tristate "NVIDIA Tegra XUSB pad controller driver" depends on ARCH_TEGRA + select USB_CONN_GPIO + select USB_PHY help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile index 320dd389f34d..89b84067cb4c 100644 --- a/drivers/phy/tegra/Makefile +++ b/drivers/phy/tegra/Makefile @@ -6,4 +6,5 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o +phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_194_SOC) += xusb-tegra186.o obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c index 98d84920c676..db56c7fbe60b 100644 --- a/drivers/phy/tegra/xusb-tegra124.c +++ b/drivers/phy/tegra/xusb-tegra124.c @@ -1422,6 +1422,8 @@ tegra124_usb2_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra124_usb2_port_ops = { + .release = tegra_xusb_usb2_port_release, + .remove = tegra_xusb_usb2_port_remove, .enable = tegra124_usb2_port_enable, .disable = tegra124_usb2_port_disable, .map = tegra124_usb2_port_map, @@ -1443,6 +1445,7 @@ tegra124_ulpi_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra124_ulpi_port_ops = { + .release = tegra_xusb_ulpi_port_release, .enable = tegra124_ulpi_port_enable, .disable = tegra124_ulpi_port_disable, .map = tegra124_ulpi_port_map, @@ -1464,6 +1467,7 @@ tegra124_hsic_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra124_hsic_port_ops = { + .release = tegra_xusb_hsic_port_release, .enable = tegra124_hsic_port_enable, .disable = tegra124_hsic_port_disable, .map = tegra124_hsic_port_map, @@ -1647,6 +1651,8 @@ tegra124_usb3_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = { + .release = tegra_xusb_usb3_port_release, + .remove = tegra_xusb_usb3_port_remove, .enable = tegra124_usb3_port_enable, .disable = tegra124_usb3_port_disable, .map = tegra124_usb3_port_map, diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 84c27394c181..5d64f69b39a9 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -63,6 +63,10 @@ #define SSPX_ELPG_CLAMP_EN(x) BIT(0 + (x) * 3) #define SSPX_ELPG_CLAMP_EN_EARLY(x) BIT(1 + (x) * 3) #define SSPX_ELPG_VCORE_DOWN(x) BIT(2 + (x) * 3) +#define XUSB_PADCTL_SS_PORT_CFG 0x2c +#define PORTX_SPEED_SUPPORT_SHIFT(x) ((x) * 4) +#define PORTX_SPEED_SUPPORT_MASK (0x3) +#define PORT_SPEED_SUPPORT_GEN1 (0x0) #define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x88 + (x) * 0x40) #define HS_CURR_LEVEL(x) ((x) & 0x3f) @@ -301,6 +305,97 @@ static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy) tegra186_utmi_bias_pad_power_off(padctl); } +static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, + bool status) +{ + u32 value; + + dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear"); + + value = padctl_readl(padctl, USB2_VBUS_ID); + + if (status) { + value |= VBUS_OVERRIDE; + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_FLOATING; + } else { + value &= ~VBUS_OVERRIDE; + } + + padctl_writel(padctl, value, USB2_VBUS_ID); + + return 0; +} + +static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, + bool status) +{ + u32 value; + + dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear"); + + value = padctl_readl(padctl, USB2_VBUS_ID); + + if (status) { + if (value & VBUS_OVERRIDE) { + value &= ~VBUS_OVERRIDE; + padctl_writel(padctl, value, USB2_VBUS_ID); + usleep_range(1000, 2000); + + value = padctl_readl(padctl, USB2_VBUS_ID); + } + + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_GROUNDED; + } else { + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_FLOATING; + } + + padctl_writel(padctl, value, USB2_VBUS_ID); + + return 0; +} + +static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + struct tegra_xusb_lane *lane = phy_get_drvdata(phy); + struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl, + lane->index); + int err = 0; + + mutex_lock(&padctl->lock); + + dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode); + + if (mode == PHY_MODE_USB_OTG) { + if (submode == USB_ROLE_HOST) { + tegra186_xusb_padctl_id_override(padctl, true); + + err = regulator_enable(port->supply); + } else if (submode == USB_ROLE_DEVICE) { + tegra186_xusb_padctl_vbus_override(padctl, true); + } else if (submode == USB_ROLE_NONE) { + /* + * When port is peripheral only or role transitions to + * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not + * enabled. + */ + if (regulator_is_enabled(port->supply)) + regulator_disable(port->supply); + + tegra186_xusb_padctl_id_override(padctl, false); + tegra186_xusb_padctl_vbus_override(padctl, false); + } + } + + mutex_unlock(&padctl->lock); + + return err; +} + static int tegra186_utmi_phy_power_on(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); @@ -439,6 +534,7 @@ static const struct phy_ops utmi_phy_ops = { .exit = tegra186_utmi_phy_exit, .power_on = tegra186_utmi_phy_power_on, .power_off = tegra186_utmi_phy_power_off, + .set_mode = tegra186_utmi_phy_set_mode, .owner = THIS_MODULE, }; @@ -503,19 +599,6 @@ static const char * const tegra186_usb2_functions[] = { "xusb", }; -static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = { - TEGRA186_LANE("usb2-0", 0, 0, 0, usb2), - TEGRA186_LANE("usb2-1", 0, 0, 0, usb2), - TEGRA186_LANE("usb2-2", 0, 0, 0, usb2), -}; - -static const struct tegra_xusb_pad_soc tegra186_usb2_pad = { - .name = "usb2", - .num_lanes = ARRAY_SIZE(tegra186_usb2_lanes), - .lanes = tegra186_usb2_lanes, - .ops = &tegra186_usb2_pad_ops, -}; - static int tegra186_usb2_port_enable(struct tegra_xusb_port *port) { return 0; @@ -532,6 +615,8 @@ tegra186_usb2_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra186_usb2_port_ops = { + .release = tegra_xusb_usb2_port_release, + .remove = tegra_xusb_usb2_port_remove, .enable = tegra186_usb2_port_enable, .disable = tegra186_usb2_port_disable, .map = tegra186_usb2_port_map, @@ -591,6 +676,8 @@ tegra186_usb3_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra186_usb3_port_ops = { + .release = tegra_xusb_usb3_port_release, + .remove = tegra_xusb_usb3_port_remove, .enable = tegra186_usb3_port_enable, .disable = tegra186_usb3_port_disable, .map = tegra186_usb3_port_map, @@ -635,6 +722,15 @@ static int tegra186_usb3_phy_power_on(struct phy *phy) padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CAP); + if (padctl->soc->supports_gen2 && port->disable_gen2) { + value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CFG); + value &= ~(PORTX_SPEED_SUPPORT_MASK << + PORTX_SPEED_SUPPORT_SHIFT(index)); + value |= (PORT_SPEED_SUPPORT_GEN1 << + PORTX_SPEED_SUPPORT_SHIFT(index)); + padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CFG); + } + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1); value &= ~SSPX_ELPG_VCORE_DOWN(index); padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1); @@ -765,27 +861,6 @@ static const char * const tegra186_usb3_functions[] = { "xusb", }; -static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = { - TEGRA186_LANE("usb3-0", 0, 0, 0, usb3), - TEGRA186_LANE("usb3-1", 0, 0, 0, usb3), - TEGRA186_LANE("usb3-2", 0, 0, 0, usb3), -}; - -static const struct tegra_xusb_pad_soc tegra186_usb3_pad = { - .name = "usb3", - .num_lanes = ARRAY_SIZE(tegra186_usb3_lanes), - .lanes = tegra186_usb3_lanes, - .ops = &tegra186_usb3_pad_ops, -}; - -static const struct tegra_xusb_pad_soc * const tegra186_pads[] = { - &tegra186_usb2_pad, - &tegra186_usb3_pad, -#if 0 /* TODO implement */ - &tegra186_hsic_pad, -#endif -}; - static int tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl) { @@ -802,7 +877,9 @@ tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl) err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value); if (err) { - dev_err(dev, "failed to read calibration fuse: %d\n", err); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to read calibration fuse: %d\n", + err); return err; } @@ -857,34 +934,13 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl) { } -static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, - bool status) -{ - u32 value; - - dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear"); - - value = padctl_readl(padctl, USB2_VBUS_ID); - - if (status) { - value |= VBUS_OVERRIDE; - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_FLOATING; - } else { - value &= ~VBUS_OVERRIDE; - } - - padctl_writel(padctl, value, USB2_VBUS_ID); - - return 0; -} - static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = { .probe = tegra186_xusb_padctl_probe, .remove = tegra186_xusb_padctl_remove, .vbus_override = tegra186_xusb_padctl_vbus_override, }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) static const char * const tegra186_xusb_padctl_supply_names[] = { "avdd-pll-erefeut", "avdd-usb", @@ -892,6 +948,40 @@ static const char * const tegra186_xusb_padctl_supply_names[] = { "vddio-hsic", }; +static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = { + TEGRA186_LANE("usb2-0", 0, 0, 0, usb2), + TEGRA186_LANE("usb2-1", 0, 0, 0, usb2), + TEGRA186_LANE("usb2-2", 0, 0, 0, usb2), +}; + +static const struct tegra_xusb_pad_soc tegra186_usb2_pad = { + .name = "usb2", + .num_lanes = ARRAY_SIZE(tegra186_usb2_lanes), + .lanes = tegra186_usb2_lanes, + .ops = &tegra186_usb2_pad_ops, +}; + +static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = { + TEGRA186_LANE("usb3-0", 0, 0, 0, usb3), + TEGRA186_LANE("usb3-1", 0, 0, 0, usb3), + TEGRA186_LANE("usb3-2", 0, 0, 0, usb3), +}; + +static const struct tegra_xusb_pad_soc tegra186_usb3_pad = { + .name = "usb3", + .num_lanes = ARRAY_SIZE(tegra186_usb3_lanes), + .lanes = tegra186_usb3_lanes, + .ops = &tegra186_usb3_pad_ops, +}; + +static const struct tegra_xusb_pad_soc * const tegra186_pads[] = { + &tegra186_usb2_pad, + &tegra186_usb3_pad, +#if 0 /* TODO implement */ + &tegra186_hsic_pad, +#endif +}; + const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = { .num_pads = ARRAY_SIZE(tegra186_pads), .pads = tegra186_pads, @@ -916,6 +1006,67 @@ const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = { .num_supplies = ARRAY_SIZE(tegra186_xusb_padctl_supply_names), }; EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc); +#endif + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) +static const char * const tegra194_xusb_padctl_supply_names[] = { + "avdd-usb", + "vclamp-usb", +}; + +static const struct tegra_xusb_lane_soc tegra194_usb2_lanes[] = { + TEGRA186_LANE("usb2-0", 0, 0, 0, usb2), + TEGRA186_LANE("usb2-1", 0, 0, 0, usb2), + TEGRA186_LANE("usb2-2", 0, 0, 0, usb2), + TEGRA186_LANE("usb2-3", 0, 0, 0, usb2), +}; + +static const struct tegra_xusb_pad_soc tegra194_usb2_pad = { + .name = "usb2", + .num_lanes = ARRAY_SIZE(tegra194_usb2_lanes), + .lanes = tegra194_usb2_lanes, + .ops = &tegra186_usb2_pad_ops, +}; + +static const struct tegra_xusb_lane_soc tegra194_usb3_lanes[] = { + TEGRA186_LANE("usb3-0", 0, 0, 0, usb3), + TEGRA186_LANE("usb3-1", 0, 0, 0, usb3), + TEGRA186_LANE("usb3-2", 0, 0, 0, usb3), + TEGRA186_LANE("usb3-3", 0, 0, 0, usb3), +}; + +static const struct tegra_xusb_pad_soc tegra194_usb3_pad = { + .name = "usb3", + .num_lanes = ARRAY_SIZE(tegra194_usb3_lanes), + .lanes = tegra194_usb3_lanes, + .ops = &tegra186_usb3_pad_ops, +}; + +static const struct tegra_xusb_pad_soc * const tegra194_pads[] = { + &tegra194_usb2_pad, + &tegra194_usb3_pad, +}; + +const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc = { + .num_pads = ARRAY_SIZE(tegra194_pads), + .pads = tegra194_pads, + .ports = { + .usb2 = { + .ops = &tegra186_usb2_port_ops, + .count = 4, + }, + .usb3 = { + .ops = &tegra186_usb3_port_ops, + .count = 4, + }, + }, + .ops = &tegra186_xusb_padctl_ops, + .supply_names = tegra194_xusb_padctl_supply_names, + .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names), + .supports_gen2 = true, +}; +EXPORT_SYMBOL_GPL(tegra194_xusb_padctl_soc); +#endif MODULE_AUTHOR("JC Kuo <jckuo@nvidia.com>"); MODULE_DESCRIPTION("NVIDIA Tegra186 XUSB Pad Controller driver"); diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c index 394913bb2f20..66bd4613835b 100644 --- a/drivers/phy/tegra/xusb-tegra210.c +++ b/drivers/phy/tegra/xusb-tegra210.c @@ -236,6 +236,7 @@ #define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18 #define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf #define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8 +#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED 0 struct tegra210_xusb_fuse_calibration { u32 hs_curr_level[4]; @@ -935,6 +936,103 @@ static int tegra210_usb2_phy_exit(struct phy *phy) return tegra210_xusb_padctl_disable(lane->pad->padctl); } +static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, + bool status) +{ + u32 value; + + dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear"); + + value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID); + + if (status) { + value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON; + value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT); + value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT; + } else { + value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON; + } + + padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID); + + return 0; +} + +static int tegra210_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, + bool status) +{ + u32 value; + + dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear"); + + value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID); + + if (status) { + if (value & XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON) { + value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON; + padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID); + usleep_range(1000, 2000); + + value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID); + } + + value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT); + value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT; + } else { + value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT); + value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING << + XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT; + } + + padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID); + + return 0; +} + +static int tegra210_usb2_phy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + struct tegra_xusb_lane *lane = phy_get_drvdata(phy); + struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl, + lane->index); + int err = 0; + + mutex_lock(&padctl->lock); + + dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode); + + if (mode == PHY_MODE_USB_OTG) { + if (submode == USB_ROLE_HOST) { + tegra210_xusb_padctl_id_override(padctl, true); + + err = regulator_enable(port->supply); + } else if (submode == USB_ROLE_DEVICE) { + tegra210_xusb_padctl_vbus_override(padctl, true); + } else if (submode == USB_ROLE_NONE) { + /* + * When port is peripheral only or role transitions to + * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not + * be enabled. + */ + if (regulator_is_enabled(port->supply)) + regulator_disable(port->supply); + + tegra210_xusb_padctl_id_override(padctl, false); + tegra210_xusb_padctl_vbus_override(padctl, false); + } + } + + mutex_unlock(&padctl->lock); + + return err; +} + static int tegra210_usb2_phy_power_on(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); @@ -1048,9 +1146,11 @@ static int tegra210_usb2_phy_power_on(struct phy *phy) padctl_writel(padctl, value, XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index)); - err = regulator_enable(port->supply); - if (err) - return err; + if (port->supply && port->mode == USB_DR_MODE_HOST) { + err = regulator_enable(port->supply); + if (err) + return err; + } mutex_lock(&padctl->lock); @@ -1164,6 +1264,7 @@ static const struct phy_ops tegra210_usb2_phy_ops = { .exit = tegra210_usb2_phy_exit, .power_on = tegra210_usb2_phy_power_on, .power_off = tegra210_usb2_phy_power_off, + .set_mode = tegra210_usb2_phy_set_mode, .owner = THIS_MODULE, }; @@ -1852,6 +1953,8 @@ tegra210_usb2_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra210_usb2_port_ops = { + .release = tegra_xusb_usb2_port_release, + .remove = tegra_xusb_usb2_port_remove, .enable = tegra210_usb2_port_enable, .disable = tegra210_usb2_port_disable, .map = tegra210_usb2_port_map, @@ -1873,6 +1976,7 @@ tegra210_hsic_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = { + .release = tegra_xusb_hsic_port_release, .enable = tegra210_hsic_port_enable, .disable = tegra210_hsic_port_disable, .map = tegra210_hsic_port_map, @@ -2018,35 +2122,13 @@ tegra210_usb3_port_map(struct tegra_xusb_port *port) } static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = { + .release = tegra_xusb_usb3_port_release, + .remove = tegra_xusb_usb3_port_remove, .enable = tegra210_usb3_port_enable, .disable = tegra210_usb3_port_disable, .map = tegra210_usb3_port_map, }; -static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, - bool status) -{ - u32 value; - - dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear"); - - value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID); - - if (status) { - value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON; - value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK << - XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT); - value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING << - XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT; - } else { - value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON; - } - - padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID); - - return 0; -} - static int tegra210_utmi_port_reset(struct phy *phy) { struct tegra_xusb_padctl *padctl; diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index f98ec3922c02..de4a46fe1763 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -66,6 +66,12 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = { .data = &tegra186_xusb_padctl_soc, }, #endif +#if defined(CONFIG_ARCH_TEGRA_194_SOC) + { + .compatible = "nvidia,tegra194-xusb-padctl", + .data = &tegra194_xusb_padctl_soc, + }, +#endif { } }; MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); @@ -501,6 +507,10 @@ tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index) static void tegra_xusb_port_release(struct device *dev) { + struct tegra_xusb_port *port = to_tegra_xusb_port(dev); + + if (port->ops->release) + port->ops->release(port); } static struct device_type tegra_xusb_port_type = { @@ -541,6 +551,16 @@ unregister: static void tegra_xusb_port_unregister(struct tegra_xusb_port *port) { + if (!IS_ERR_OR_NULL(port->usb_role_sw)) { + of_platform_depopulate(&port->dev); + usb_role_switch_unregister(port->usb_role_sw); + cancel_work_sync(&port->usb_phy_work); + usb_remove_phy(&port->usb_phy); + } + + if (port->ops->remove) + port->ops->remove(port); + device_unregister(&port->dev); } @@ -551,11 +571,146 @@ static const char *const modes[] = { [USB_DR_MODE_OTG] = "otg", }; +static const char * const usb_roles[] = { + [USB_ROLE_NONE] = "none", + [USB_ROLE_HOST] = "host", + [USB_ROLE_DEVICE] = "device", +}; + +static enum usb_phy_events to_usb_phy_event(enum usb_role role) +{ + switch (role) { + case USB_ROLE_DEVICE: + return USB_EVENT_VBUS; + + case USB_ROLE_HOST: + return USB_EVENT_ID; + + default: + return USB_EVENT_NONE; + } +} + +static void tegra_xusb_usb_phy_work(struct work_struct *work) +{ + struct tegra_xusb_port *port = container_of(work, + struct tegra_xusb_port, + usb_phy_work); + enum usb_role role = usb_role_switch_get_role(port->usb_role_sw); + + usb_phy_set_event(&port->usb_phy, to_usb_phy_event(role)); + + dev_dbg(&port->dev, "%s(): calling notifier for role %s\n", __func__, + usb_roles[role]); + + atomic_notifier_call_chain(&port->usb_phy.notifier, 0, &port->usb_phy); +} + +static int tegra_xusb_role_sw_set(struct usb_role_switch *sw, + enum usb_role role) +{ + struct tegra_xusb_port *port = usb_role_switch_get_drvdata(sw); + + dev_dbg(&port->dev, "%s(): role %s\n", __func__, usb_roles[role]); + + schedule_work(&port->usb_phy_work); + + return 0; +} + +static int tegra_xusb_set_peripheral(struct usb_otg *otg, + struct usb_gadget *gadget) +{ + struct tegra_xusb_port *port = container_of(otg->usb_phy, + struct tegra_xusb_port, + usb_phy); + + if (gadget != NULL) + schedule_work(&port->usb_phy_work); + + return 0; +} + +static int tegra_xusb_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + struct tegra_xusb_port *port = container_of(otg->usb_phy, + struct tegra_xusb_port, + usb_phy); + + if (host != NULL) + schedule_work(&port->usb_phy_work); + + return 0; +} + + +static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port) +{ + struct tegra_xusb_lane *lane; + struct usb_role_switch_desc role_sx_desc = { + .fwnode = dev_fwnode(&port->dev), + .set = tegra_xusb_role_sw_set, + }; + int err = 0; + + /* + * USB role switch driver needs parent driver owner info. This is a + * suboptimal solution. TODO: Need to revisit this in a follow-up patch + * where an optimal solution is possible with changes to USB role + * switch driver. + */ + port->dev.driver = devm_kzalloc(&port->dev, + sizeof(struct device_driver), + GFP_KERNEL); + port->dev.driver->owner = THIS_MODULE; + + port->usb_role_sw = usb_role_switch_register(&port->dev, + &role_sx_desc); + if (IS_ERR(port->usb_role_sw)) { + err = PTR_ERR(port->usb_role_sw); + dev_err(&port->dev, "failed to register USB role switch: %d", + err); + return err; + } + + INIT_WORK(&port->usb_phy_work, tegra_xusb_usb_phy_work); + usb_role_switch_set_drvdata(port->usb_role_sw, port); + + port->usb_phy.otg = devm_kzalloc(&port->dev, sizeof(struct usb_otg), + GFP_KERNEL); + if (!port->usb_phy.otg) + return -ENOMEM; + + lane = tegra_xusb_find_lane(port->padctl, "usb2", port->index); + + /* + * Assign phy dev to usb-phy dev. Host/device drivers can use phy + * reference to retrieve usb-phy details. + */ + port->usb_phy.dev = &lane->pad->lanes[port->index]->dev; + port->usb_phy.dev->driver = port->padctl->dev->driver; + port->usb_phy.otg->usb_phy = &port->usb_phy; + port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral; + port->usb_phy.otg->set_host = tegra_xusb_set_host; + + err = usb_add_phy_dev(&port->usb_phy); + if (err < 0) { + dev_err(&port->dev, "Failed to add USB PHY: %d\n", err); + return err; + } + + /* populate connector entry */ + of_platform_populate(port->dev.of_node, NULL, NULL, &port->dev); + + return err; +} + static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2) { struct tegra_xusb_port *port = &usb2->base; struct device_node *np = port->dev.of_node; const char *mode; + int err; usb2->internal = of_property_read_bool(np, "nvidia,internal"); @@ -572,7 +727,21 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2) usb2->mode = USB_DR_MODE_HOST; } - usb2->supply = devm_regulator_get(&port->dev, "vbus"); + /* usb-role-switch property is mandatory for OTG/Peripheral modes */ + if (usb2->mode == USB_DR_MODE_PERIPHERAL || + usb2->mode == USB_DR_MODE_OTG) { + if (of_property_read_bool(np, "usb-role-switch")) { + err = tegra_xusb_setup_usb_role_switch(port); + if (err < 0) + return err; + } else { + dev_err(&port->dev, "usb-role-switch not found for %s mode", + modes[usb2->mode]); + return -EINVAL; + } + } + + usb2->supply = regulator_get(&port->dev, "vbus"); return PTR_ERR_OR_ZERO(usb2->supply); } @@ -591,7 +760,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl, if (!np || !of_device_is_available(np)) goto out; - usb2 = devm_kzalloc(padctl->dev, sizeof(*usb2), GFP_KERNEL); + usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL); if (!usb2) { err = -ENOMEM; goto out; @@ -622,6 +791,20 @@ out: return err; } +void tegra_xusb_usb2_port_release(struct tegra_xusb_port *port) +{ + struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port); + + kfree(usb2); +} + +void tegra_xusb_usb2_port_remove(struct tegra_xusb_port *port) +{ + struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port); + + regulator_put(usb2->supply); +} + static int tegra_xusb_ulpi_port_parse_dt(struct tegra_xusb_ulpi_port *ulpi) { struct tegra_xusb_port *port = &ulpi->base; @@ -643,7 +826,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl, if (!np || !of_device_is_available(np)) goto out; - ulpi = devm_kzalloc(padctl->dev, sizeof(*ulpi), GFP_KERNEL); + ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL); if (!ulpi) { err = -ENOMEM; goto out; @@ -674,6 +857,13 @@ out: return err; } +void tegra_xusb_ulpi_port_release(struct tegra_xusb_port *port) +{ + struct tegra_xusb_ulpi_port *ulpi = to_ulpi_port(port); + + kfree(ulpi); +} + static int tegra_xusb_hsic_port_parse_dt(struct tegra_xusb_hsic_port *hsic) { /* XXX */ @@ -691,7 +881,7 @@ static int tegra_xusb_add_hsic_port(struct tegra_xusb_padctl *padctl, if (!np || !of_device_is_available(np)) goto out; - hsic = devm_kzalloc(padctl->dev, sizeof(*hsic), GFP_KERNEL); + hsic = kzalloc(sizeof(*hsic), GFP_KERNEL); if (!hsic) { err = -ENOMEM; goto out; @@ -722,10 +912,18 @@ out: return err; } +void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port) +{ + struct tegra_xusb_hsic_port *hsic = to_hsic_port(port); + + kfree(hsic); +} + static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3) { struct tegra_xusb_port *port = &usb3->base; struct device_node *np = port->dev.of_node; + enum usb_device_speed maximum_speed; u32 value; int err; @@ -739,7 +937,17 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3) usb3->internal = of_property_read_bool(np, "nvidia,internal"); - usb3->supply = devm_regulator_get(&port->dev, "vbus"); + if (device_property_present(&port->dev, "maximum-speed")) { + maximum_speed = usb_get_maximum_speed(&port->dev); + if (maximum_speed == USB_SPEED_SUPER) + usb3->disable_gen2 = true; + else if (maximum_speed == USB_SPEED_SUPER_PLUS) + usb3->disable_gen2 = false; + else + return -EINVAL; + } + + usb3->supply = regulator_get(&port->dev, "vbus"); return PTR_ERR_OR_ZERO(usb3->supply); } @@ -759,7 +967,7 @@ static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl, if (!np || !of_device_is_available(np)) goto out; - usb3 = devm_kzalloc(padctl->dev, sizeof(*usb3), GFP_KERNEL); + usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL); if (!usb3) { err = -ENOMEM; goto out; @@ -790,6 +998,20 @@ out: return err; } +void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port) +{ + struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port); + + kfree(usb3); +} + +void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port) +{ + struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port); + + regulator_put(usb3->supply); +} + static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) { struct tegra_xusb_port *port, *tmp; @@ -1001,7 +1223,13 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) err = tegra_xusb_setup_ports(padctl); if (err) { - dev_err(&pdev->dev, "failed to setup XUSB ports: %d\n", err); + const char *level = KERN_ERR; + + if (err == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, &pdev->dev, + dev_fmt("failed to setup XUSB ports: %d\n"), err); goto remove_pads; } @@ -1143,6 +1371,27 @@ int tegra_phy_xusb_utmi_port_reset(struct phy *phy) } EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset); +int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, + unsigned int port) +{ + struct tegra_xusb_usb2_port *usb2; + struct tegra_xusb_usb3_port *usb3; + int i; + + usb2 = tegra_xusb_find_usb2_port(padctl, port); + if (!usb2) + return -EINVAL; + + for (i = 0; i < padctl->soc->ports.usb3.count; i++) { + usb3 = tegra_xusb_find_usb3_port(padctl, i); + if (usb3 && usb3->port == usb2->base.index) + return usb3->base.index; + } + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion); + MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h index da94fcce6307..ea35af747066 100644 --- a/drivers/phy/tegra/xusb.h +++ b/drivers/phy/tegra/xusb.h @@ -12,6 +12,7 @@ #include <linux/workqueue.h> #include <linux/usb/otg.h> +#include <linux/usb/role.h> /* legacy entry points for backwards-compatibility */ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev); @@ -266,9 +267,18 @@ struct tegra_xusb_port { struct list_head list; struct device dev; + struct usb_role_switch *usb_role_sw; + struct work_struct usb_phy_work; + struct usb_phy usb_phy; + const struct tegra_xusb_port_ops *ops; }; +static inline struct tegra_xusb_port *to_tegra_xusb_port(struct device *dev) +{ + return container_of(dev, struct tegra_xusb_port, dev); +} + struct tegra_xusb_lane_map { unsigned int port; const char *type; @@ -303,6 +313,8 @@ to_usb2_port(struct tegra_xusb_port *port) struct tegra_xusb_usb2_port * tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl, unsigned int index); +void tegra_xusb_usb2_port_release(struct tegra_xusb_port *port); +void tegra_xusb_usb2_port_remove(struct tegra_xusb_port *port); struct tegra_xusb_ulpi_port { struct tegra_xusb_port base; @@ -317,6 +329,8 @@ to_ulpi_port(struct tegra_xusb_port *port) return container_of(port, struct tegra_xusb_ulpi_port, base); } +void tegra_xusb_ulpi_port_release(struct tegra_xusb_port *port); + struct tegra_xusb_hsic_port { struct tegra_xusb_port base; }; @@ -327,12 +341,15 @@ to_hsic_port(struct tegra_xusb_port *port) return container_of(port, struct tegra_xusb_hsic_port, base); } +void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port); + struct tegra_xusb_usb3_port { struct tegra_xusb_port base; struct regulator *supply; bool context_saved; unsigned int port; bool internal; + bool disable_gen2; u32 tap1; u32 amp; @@ -349,8 +366,12 @@ to_usb3_port(struct tegra_xusb_port *port) struct tegra_xusb_usb3_port * tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index); +void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port); +void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port); struct tegra_xusb_port_ops { + void (*release)(struct tegra_xusb_port *port); + void (*remove)(struct tegra_xusb_port *port); int (*enable)(struct tegra_xusb_port *port); void (*disable)(struct tegra_xusb_port *port); struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port); @@ -392,6 +413,7 @@ struct tegra_xusb_padctl_soc { const char * const *supply_names; unsigned int num_supplies; + bool supports_gen2; bool need_fake_usb3_port; }; @@ -448,5 +470,8 @@ extern const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc; #if defined(CONFIG_ARCH_TEGRA_186_SOC) extern const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc; #endif +#if defined(CONFIG_ARCH_TEGRA_194_SOC) +extern const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc; +#endif #endif /* __PHY_TEGRA_XUSB_H */ diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index a28bd15297f5..7edd5c3bc536 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -80,20 +80,20 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode) break; case PHY_INTERFACE_MODE_MII: - mode = AM33XX_GMII_SEL_MODE_MII; + case PHY_INTERFACE_MODE_GMII: + gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII; break; default: - dev_warn(dev, - "port%u: unsupported mode: \"%s\". Defaulting to MII.\n", - if_phy->id, phy_modes(rgmii_id)); + dev_warn(dev, "port%u: unsupported mode: \"%s\"\n", + if_phy->id, phy_modes(submode)); return -EINVAL; } if_phy->phy_if_mode = submode; dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n", - __func__, if_phy->id, mode, rgmii_id, + __func__, if_phy->id, submode, rgmii_id, if_phy->rmii_clock_external); regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE]; @@ -170,6 +170,21 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = { .regfields = phy_gmii_sel_fields_am33xx, }; +static const +struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = { + { + [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4040, 0, 1), + [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0), + [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0), + }, +}; + +static const +struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = { + .num_ports = 1, + .regfields = phy_gmii_sel_fields_am654, +}; + static const struct of_device_id phy_gmii_sel_id_table[] = { { .compatible = "ti,am3352-phy-gmii-sel", @@ -187,6 +202,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = { .compatible = "ti,dm814-phy-gmii-sel", .data = &phy_gmii_sel_soc_dm814, }, + { + .compatible = "ti,am654-phy-gmii-sel", + .data = &phy_gmii_sel_soc_am654, + }, {} }; MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table); diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index 7b6409ef553c..dce2626384a9 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -1073,13 +1073,26 @@ static int madera_pin_probe(struct platform_device *pdev) return ret; } + platform_set_drvdata(pdev, priv); + dev_dbg(priv->dev, "pinctrl probed ok\n"); return 0; } +static int madera_pin_remove(struct platform_device *pdev) +{ + struct madera_pin_private *priv = platform_get_drvdata(pdev); + + if (priv->madera->pdata.gpio_configs) + pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs); + + return 0; +} + static struct platform_driver madera_pin_driver = { .probe = madera_pin_probe, + .remove = madera_pin_remove, .driver = { .name = "madera-pinctrl", }, diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 446d84fe0e31..f23c55e22195 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2021,7 +2021,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) return PTR_ERR(pctldev->p); } - kref_get(&pctldev->p->users); pctldev->hog_default = pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); if (IS_ERR(pctldev->hog_default)) { diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 9357f7c46cf3..1ed20ac2243f 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -127,11 +127,12 @@ static int dt_to_map_one_config(struct pinctrl *p, np_pctldev = of_get_next_parent(np_pctldev); if (!np_pctldev || of_node_is_root(np_pctldev)) { of_node_put(np_pctldev); + ret = driver_deferred_probe_check_state(p->dev); /* keep deferring if modules are enabled unless we've timed out */ - if (IS_ENABLED(CONFIG_MODULES) && !allow_default) - return driver_deferred_probe_check_state_continue(p->dev); - - return driver_deferred_probe_check_state(p->dev); + if (IS_ENABLED(CONFIG_MODULES) && !allow_default && + (ret == -ENODEV)) + ret = -EPROBE_DEFER; + return ret; } /* If we're creating a hog we can use the passed pctldev */ if (hog_pctldev && (np_pctldev == p->dev->of_node)) { diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c index 73bf1d9f9cc6..23cf04bdfc55 100644 --- a/drivers/pinctrl/freescale/pinctrl-scu.c +++ b/drivers/pinctrl/freescale/pinctrl-scu.c @@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set { struct imx_sc_rpc_msg hdr; u32 val; u16 pad; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_pad_get { struct imx_sc_rpc_msg hdr; u16 pad; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_resp_pad_get { struct imx_sc_rpc_msg hdr; diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 1b6e8646700f..2ac921c83da9 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[] = { GPIOX_0 }; static const unsigned int sdio_d1_pins[] = { GPIOX_1 }; static const unsigned int sdio_d2_pins[] = { GPIOX_2 }; static const unsigned int sdio_d3_pins[] = { GPIOX_3 }; -static const unsigned int sdio_cmd_pins[] = { GPIOX_4 }; -static const unsigned int sdio_clk_pins[] = { GPIOX_5 }; +static const unsigned int sdio_clk_pins[] = { GPIOX_4 }; +static const unsigned int sdio_cmd_pins[] = { GPIOX_5 }; static const unsigned int sdio_irq_pins[] = { GPIOX_7 }; static const unsigned int nand_ce0_pins[] = { BOOT_8 }; diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c index a454f57c264e..62c02b969327 100644 --- a/drivers/pinctrl/pinctrl-falcon.c +++ b/drivers/pinctrl/pinctrl-falcon.c @@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); if (IS_ERR(falcon_info.clk[*bank])) { dev_err(&ppdev->dev, "failed to get clock\n"); - of_node_put(np) + of_node_put(np); return PTR_ERR(falcon_info.clk[*bank]); } falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev, diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 9a8daa256a32..1a948c3f54b7 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1104,7 +1104,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) pctrl->irq_chip.irq_mask = msm_gpio_irq_mask; pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask; pctrl->irq_chip.irq_ack = msm_gpio_irq_ack; - pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent; pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type; pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake; pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres; @@ -1118,7 +1117,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) if (!chip->irq.parent_domain) return -EPROBE_DEFER; chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq; - + pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent; /* * Let's skip handling the GPIOs, if the parent irqchip * is handling the direct connect IRQ of the GPIO. diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index fba1d41d20ec..338a15d08629 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node); girq->parent_domain = parent_domain; girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq; - girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell; + girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell; girq->child_offset_to_irq = pm8xxx_child_offset_to_irq; girq->child_irq_domain_ops.translate = pm8xxx_domain_translate; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 2d5e0435af0a..af3b24f26ff2 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -92,6 +92,7 @@ struct stm32_gpio_bank { u32 bank_nr; u32 bank_ioport_nr; u32 pin_backup[STM32_GPIO_PINS_PER_BANK]; + u8 irq_type[STM32_GPIO_PINS_PER_BANK]; }; struct stm32_pinctrl { @@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = { .get_direction = stm32_gpio_get_direction, }; +static void stm32_gpio_irq_trigger(struct irq_data *d) +{ + struct stm32_gpio_bank *bank = d->domain->host_data; + int level; + + /* If level interrupt type then retrig */ + level = stm32_gpio_get(&bank->gpio_chip, d->hwirq); + if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) || + (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH)) + irq_chip_retrigger_hierarchy(d); +} + +static void stm32_gpio_irq_eoi(struct irq_data *d) +{ + irq_chip_eoi_parent(d); + stm32_gpio_irq_trigger(d); +}; + +static int stm32_gpio_set_type(struct irq_data *d, unsigned int type) +{ + struct stm32_gpio_bank *bank = d->domain->host_data; + u32 parent_type; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_EDGE_BOTH: + parent_type = type; + break; + case IRQ_TYPE_LEVEL_HIGH: + parent_type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_LEVEL_LOW: + parent_type = IRQ_TYPE_EDGE_FALLING; + break; + default: + return -EINVAL; + } + + bank->irq_type[d->hwirq] = type; + + return irq_chip_set_type_parent(d, parent_type); +}; + static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; @@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); } +static void stm32_gpio_irq_unmask(struct irq_data *d) +{ + irq_chip_unmask_parent(d); + stm32_gpio_irq_trigger(d); +} + static struct irq_chip stm32_gpio_irq_chip = { .name = "stm32gpio", - .irq_eoi = irq_chip_eoi_parent, + .irq_eoi = stm32_gpio_irq_eoi, .irq_ack = irq_chip_ack_parent, .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, + .irq_unmask = stm32_gpio_irq_unmask, + .irq_set_type = stm32_gpio_set_type, .irq_set_wake = irq_chip_set_wake_parent, .irq_request_resources = stm32_gpio_irq_request_resources, .irq_release_resources = stm32_gpio_irq_release_resources, diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c index e69682c95ea2..62f27610dd33 100644 --- a/drivers/platform/chrome/wilco_ec/properties.c +++ b/drivers/platform/chrome/wilco_ec/properties.c @@ -5,7 +5,7 @@ #include <linux/platform_data/wilco-ec.h> #include <linux/string.h> -#include <linux/unaligned/le_memmove.h> +#include <asm/unaligned.h> /* Operation code; what the EC should do with the property */ enum ec_property_op { diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 587403c44598..cd9e2758c479 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1252,6 +1252,7 @@ config INTEL_TURBO_MAX_3 config TOUCHSCREEN_DMI bool "DMI based touchscreen configuration info" depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD + select EFI_EMBEDDED_FIRMWARE if EFI ---help--- Certain ACPI based tablets with e.g. Silead or Chipone touchscreens do not have enough data in ACPI tables for the touchscreen driver to diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c index bfcc1d1b9b96..b531fe8ab7e0 100644 --- a/drivers/platform/x86/dell-smo8800.c +++ b/drivers/platform/x86/dell-smo8800.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> +#include <linux/fs.h> struct smo8800_device { u32 irq; /* acpi device irq */ diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c index 2b1a0734c3f8..859272075a8f 100644 --- a/drivers/platform/x86/intel-uncore-frequency.c +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -358,15 +358,13 @@ static struct notifier_block uncore_pm_nb = { .notifier_call = uncore_pm_notify, }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - static const struct x86_cpu_id intel_uncore_cpu_ids[] = { - ICPU(INTEL_FAM6_BROADWELL_G), - ICPU(INTEL_FAM6_BROADWELL_X), - ICPU(INTEL_FAM6_BROADWELL_D), - ICPU(INTEL_FAM6_SKYLAKE_X), - ICPU(INTEL_FAM6_ICELAKE_X), - ICPU(INTEL_FAM6_ICELAKE_D), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), {} }; diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index f14e2c5f9da5..7b23efc46a43 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -148,8 +148,8 @@ static struct irq_chip int0002_cht_irqchip = { }; static const struct x86_cpu_id int0002_cpu_ids[] = { - INTEL_CPU_FAM6(ATOM_SILVERMONT, int0002_byt_irqchip), /* Valleyview, Bay Trail */ - INTEL_CPU_FAM6(ATOM_AIRMONT, int0002_cht_irqchip), /* Braswell, Cherry Trail */ + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip), {} }; diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 6f436836fe50..9c9f209c8a33 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -113,8 +113,8 @@ static const struct mid_pb_ddata mrfld_ddata = { }; static const struct x86_cpu_id mid_pb_cpu_ids[] = { - INTEL_CPU_FAM6(ATOM_SALTWELL_MID, mfld_ddata), - INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, mrfld_ddata), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &mfld_ddata), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &mrfld_ddata), {} }; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 144faa8bad3d..3df33ff50faa 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -871,18 +871,18 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) #endif /* CONFIG_DEBUG_FS */ static const struct x86_cpu_id intel_pmc_core_ids[] = { - INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map), - INTEL_CPU_FAM6(SKYLAKE, spt_reg_map), - INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map), - INTEL_CPU_FAM6(KABYLAKE, spt_reg_map), - INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map), - INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map), - INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), - INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), - INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), - INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), - INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), - INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map), {} }; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index e1266f5c6359..731281855cc8 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -38,14 +38,14 @@ static struct platform_device pmc_core_device = { * other list may grow, but this list should not. */ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { - INTEL_CPU_FAM6(SKYLAKE_L, pmc_core_device), - INTEL_CPU_FAM6(SKYLAKE, pmc_core_device), - INTEL_CPU_FAM6(KABYLAKE_L, pmc_core_device), - INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), - INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), - INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), - INTEL_CPU_FAM6(COMETLAKE, pmc_core_device), - INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c index 89b042aecef3..1b6eab071068 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c @@ -160,10 +160,8 @@ static struct notifier_block isst_pm_nb = { .notifier_call = isst_pm_notify, }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - static const struct x86_cpu_id isst_if_cpu_ids[] = { - ICPU(INTEL_FAM6_SKYLAKE_X), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids); diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index 8e3fb55ac1ae..8a53d3b485b3 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -308,11 +308,10 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { }; static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = { - INTEL_CPU_FAM6(ATOM_GOLDMONT, telem_apl_debugfs_conf), - INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf), {} }; - MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids); static int telemetry_debugfs_check_evts(void) diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index c4c742bb23cf..987a24e3344e 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -67,9 +67,6 @@ #define TELEM_CLEAR_VERBOSITY_BITS(x) ((x) &= ~(BIT(27) | BIT(28))) #define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27)) -#define TELEM_CPU(model, data) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data } - enum telemetry_action { TELEM_UPDATE = 0, TELEM_ADD, @@ -183,8 +180,8 @@ static struct telemetry_plt_config telem_glk_config = { }; static const struct x86_cpu_id telemetry_cpu_ids[] = { - TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config), - TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config), {} }; diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c index 7b9cc841ab65..892140b62898 100644 --- a/drivers/platform/x86/intel_turbo_max_3.c +++ b/drivers/platform/x86/intel_turbo_max_3.c @@ -113,11 +113,9 @@ static int itmt_legacy_cpu_online(unsigned int cpu) return 0; } -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - static const struct x86_cpu_id itmt_legacy_cpu_ids[] = { - ICPU(INTEL_FAM6_BROADWELL_X), - ICPU(INTEL_FAM6_SKYLAKE_X), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), {} }; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 93177e6e5ecd..6ec8923dec1a 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -11,12 +11,15 @@ #include <linux/acpi.h> #include <linux/device.h> #include <linux/dmi.h> +#include <linux/efi_embedded_fw.h> #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/property.h> #include <linux/string.h> struct ts_dmi_data { + /* The EFI embedded-fw code expects this to be the first member! */ + struct efi_embedded_fw_desc embedded_fw; const char *acpi_name; const struct property_entry *properties; }; @@ -64,6 +67,15 @@ static const struct property_entry chuwi_hi8_pro_props[] = { }; static const struct ts_dmi_data chuwi_hi8_pro_data = { + .embedded_fw = { + .name = "silead/gsl3680-chuwi-hi8-pro.fw", + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, + .length = 39864, + .sha256 = { 0xc0, 0x88, 0xc5, 0xef, 0xd1, 0x70, 0x77, 0x59, + 0x4e, 0xe9, 0xc4, 0xd8, 0x2e, 0xcd, 0xbf, 0x95, + 0x32, 0xd9, 0x03, 0x28, 0x0d, 0x48, 0x9f, 0x92, + 0x35, 0x37, 0xf6, 0x8b, 0x2a, 0xe4, 0x73, 0xff }, + }, .acpi_name = "MSSL1680:00", .properties = chuwi_hi8_pro_props, }; @@ -120,6 +132,18 @@ static const struct ts_dmi_data chuwi_vi8_data = { .properties = chuwi_vi8_props, }; +static const struct ts_dmi_data chuwi_vi8_plus_data = { + .embedded_fw = { + .name = "chipone/icn8505-HAMP0002.fw", + .prefix = { 0xb0, 0x07, 0x00, 0x00, 0xe4, 0x07, 0x00, 0x00 }, + .length = 35012, + .sha256 = { 0x93, 0xe5, 0x49, 0xe0, 0xb6, 0xa2, 0xb4, 0xb3, + 0x88, 0x96, 0x34, 0x97, 0x5e, 0xa8, 0x13, 0x78, + 0x72, 0x98, 0xb8, 0x29, 0xeb, 0x5c, 0xa7, 0xf1, + 0x25, 0x13, 0x43, 0xf4, 0x30, 0x7c, 0xfc, 0x7c }, + }, +}; + static const struct property_entry chuwi_vi10_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 0), PROPERTY_ENTRY_U32("touchscreen-min-y", 4), @@ -181,6 +205,15 @@ static const struct property_entry cube_iwork8_air_props[] = { }; static const struct ts_dmi_data cube_iwork8_air_data = { + .embedded_fw = { + .name = "silead/gsl3670-cube-iwork8-air.fw", + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, + .length = 38808, + .sha256 = { 0xff, 0x62, 0x2d, 0xd1, 0x8a, 0x78, 0x04, 0x7b, + 0x33, 0x06, 0xb0, 0x4f, 0x7f, 0x02, 0x08, 0x9c, + 0x96, 0xd4, 0x9f, 0x04, 0xe1, 0x47, 0x25, 0x25, + 0x60, 0x77, 0x41, 0x33, 0xeb, 0x12, 0x82, 0xfc }, + }, .acpi_name = "MSSL1680:00", .properties = cube_iwork8_air_props, }; @@ -387,6 +420,15 @@ static const struct property_entry onda_v80_plus_v3_props[] = { }; static const struct ts_dmi_data onda_v80_plus_v3_data = { + .embedded_fw = { + .name = "silead/gsl3676-onda-v80-plus-v3.fw", + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, + .length = 37224, + .sha256 = { 0x8f, 0xbd, 0x8f, 0x0c, 0x6b, 0xba, 0x5b, 0xf5, + 0xa3, 0xc7, 0xa3, 0xc0, 0x4f, 0xcd, 0xdf, 0x32, + 0xcc, 0xe4, 0x70, 0xd6, 0x46, 0x9c, 0xd7, 0xa7, + 0x4b, 0x82, 0x3f, 0xab, 0xc7, 0x90, 0xea, 0x23 }, + }, .acpi_name = "MSSL1680:00", .properties = onda_v80_plus_v3_props, }; @@ -449,6 +491,15 @@ static const struct property_entry pipo_w2s_props[] = { }; static const struct ts_dmi_data pipo_w2s_data = { + .embedded_fw = { + .name = "silead/gsl1680-pipo-w2s.fw", + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, + .length = 39072, + .sha256 = { 0xd0, 0x58, 0xc4, 0x7d, 0x55, 0x2d, 0x62, 0x18, + 0xd1, 0x6a, 0x71, 0x73, 0x0b, 0x3f, 0xbe, 0x60, + 0xbb, 0x45, 0x8c, 0x52, 0x27, 0xb7, 0x18, 0xf4, + 0x31, 0x00, 0x6a, 0x49, 0x76, 0xd8, 0x7c, 0xd3 }, + }, .acpi_name = "MSSL1680:00", .properties = pipo_w2s_props, }; @@ -641,7 +692,7 @@ static const struct ts_dmi_data trekstor_surftab_wintron70_data = { }; /* NOTE: Please keep this table sorted alphabetically */ -static const struct dmi_system_id touchscreen_dmi_table[] = { +const struct dmi_system_id touchscreen_dmi_table[] = { { /* Chuwi Hi8 */ .driver_data = (void *)&chuwi_hi8_data, @@ -704,6 +755,15 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Chuwi Vi8 Plus (CWI519) */ + .driver_data = (void *)&chuwi_vi8_plus_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"), + DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"), + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, + { /* Chuwi Vi10 (CWI505) */ .driver_data = (void *)&chuwi_vi10_data, .matches = { @@ -1106,6 +1166,9 @@ static int __init ts_dmi_init(void) return 0; /* Not an error */ ts_data = dmi_id->driver_data; + /* Some dmi table entries only provide an efi_embedded_fw_desc */ + if (!ts_data->properties) + return 0; error = bus_register_notifier(&i2c_bus_type, &ts_dmi_notifier); if (error) diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index dc2e966a5c25..941739db7199 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -29,6 +29,7 @@ #include <linux/uaccess.h> #include <linux/uuid.h> #include <linux/wmi.h> +#include <linux/fs.h> #include <uapi/linux/wmi.h> ACPI_MODULE_NAME("wmi"); diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c index cd1270614cc6..e9bbd3c42eef 100644 --- a/drivers/powercap/idle_inject.c +++ b/drivers/powercap/idle_inject.c @@ -67,7 +67,7 @@ struct idle_inject_device { struct hrtimer timer; unsigned int idle_duration_us; unsigned int run_duration_us; - unsigned long int cpumask[0]; + unsigned long cpumask[]; }; static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 73257cf107d9..eb328655bc01 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -951,52 +951,51 @@ static const struct rapl_defaults rapl_defaults_cht = { }; static const struct x86_cpu_id rapl_ids[] __initconst = { - INTEL_CPU_FAM6(SANDYBRIDGE, rapl_defaults_core), - INTEL_CPU_FAM6(SANDYBRIDGE_X, rapl_defaults_core), - - INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core), - INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core), - - INTEL_CPU_FAM6(HASWELL, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_G, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server), - - INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core), - INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core), - INTEL_CPU_FAM6(BROADWELL_D, rapl_defaults_core), - INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), - - INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core), - INTEL_CPU_FAM6(SKYLAKE_L, rapl_defaults_core), - INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(KABYLAKE_L, rapl_defaults_core), - INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core), - INTEL_CPU_FAM6(CANNONLAKE_L, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE_L, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core), - INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core), - INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core), - - INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), - INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), - INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, rapl_defaults_tng), - INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann), - INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core), - - INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core), + + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core), + + X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server), + + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server), + + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core), + + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core), + + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server), {} }; - MODULE_DEVICE_TABLE(x86cpu, rapl_ids); /* Read once for all raw primitive data for domains */ diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 074a2ef55943..f4b72cb098ef 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -107,6 +107,7 @@ config REGULATOR_AD5398 config REGULATOR_ANATOP tristate "Freescale i.MX on-chip ANATOP LDO regulators" + depends on ARCH_MXC || COMPILE_TEST depends on MFD_SYSCON help Say y here to support Freescale i.MX on-chip ANATOP LDOs @@ -613,6 +614,16 @@ config REGULATOR_MCP16502 through the regulator interface. In addition it enables suspend-to-ram/standby transition. +config REGULATOR_MP5416 + tristate "Monolithic MP5416 PMIC" + depends on I2C && OF + select REGMAP_I2C + help + Say y here to support the MP5416 PMIC. This will enable supports + the software controllable 4 buck and 4 LDO regulators. + Say M here if you want to include support for the regulator as a + module. + config REGULATOR_MP8859 tristate "MPS MP8859 regulator driver" depends on I2C @@ -624,6 +635,13 @@ config REGULATOR_MP8859 Say M here if you want to include support for the regulator as a module. The module will be named "mp8859". +config REGULATOR_MP886X + tristate "MPS MP8869 regulator driver" + depends on I2C && (OF || COMPILE_TEST) + select REGMAP_I2C + help + This driver supports the MP8869 voltage regulator. + config REGULATOR_MPQ7920 tristate "Monolithic MPQ7920 PMIC" depends on I2C && OF diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index c0d6b96ebd78..6610ee001d9a 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -78,7 +78,9 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o +obj-$(CONFIG_REGULATOR_MP5416) += mp5416.o obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o +obj-$(CONFIG_REGULATOR_MP886X) += mp886x.o obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 754739d004e5..ca92b3de0e9c 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -305,9 +305,13 @@ static int anatop_regulator_probe(struct platform_device *pdev) /* register regulator */ rdev = devm_regulator_register(dev, rdesc, &config); if (IS_ERR(rdev)) { - dev_err(dev, "failed to register %s\n", - rdesc->name); - return PTR_ERR(rdev); + ret = PTR_ERR(rdev); + if (ret == -EPROBE_DEFER) + dev_dbg(dev, "failed to register %s, deferring...\n", + rdesc->name); + else + dev_err(dev, "failed to register %s\n", rdesc->name); + return ret; } platform_set_drvdata(pdev, rdev); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 16f0c8570036..1e6eb5b1f8d8 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -381,8 +381,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) mask = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK | AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK; enable = (ramp > 0) ? - AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : - !AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN; + AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : 0; break; } @@ -393,8 +392,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) mask = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK | AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK; enable = (ramp > 0) ? - AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : - !AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN; + AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : 0; break; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d015d99cb59d..c340505150b6 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1849,7 +1849,6 @@ struct regulator *_regulator_get(struct device *dev, const char *id, { struct regulator_dev *rdev; struct regulator *regulator; - const char *devname = dev ? dev_name(dev) : "deviceless"; struct device_link *link; int ret; @@ -1887,9 +1886,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id, * enabled, even if it isn't hooked up, and just * provide a dummy. */ - dev_warn(dev, - "%s supply %s not found, using dummy regulator\n", - devname, id); + dev_warn(dev, "supply %s not found, using dummy regulator\n", id); rdev = dummy_regulator_rdev; get_device(&rdev->dev); break; @@ -5757,6 +5754,10 @@ static DECLARE_DELAYED_WORK(regulator_init_complete_work, static int __init regulator_init_complete(void) { + int delay = driver_deferred_probe_timeout; + + if (delay < 0) + delay = 0; /* * Since DT doesn't provide an idiomatic mechanism for * enabling full constraints and since it's much more natural @@ -5767,18 +5768,17 @@ static int __init regulator_init_complete(void) has_full_constraints = true; /* - * We punt completion for an arbitrary amount of time since - * systems like distros will load many drivers from userspace - * so consumers might not always be ready yet, this is - * particularly an issue with laptops where this might bounce - * the display off then on. Ideally we'd get a notification - * from userspace when this happens but we don't so just wait - * a bit and hope we waited long enough. It'd be better if - * we'd only do this on systems that need it, and a kernel - * command line option might be useful. + * If driver_deferred_probe_timeout is set, we punt + * completion for that many seconds since systems like + * distros will load many drivers from userspace so consumers + * might not always be ready yet, this is particularly an + * issue with laptops where this might bounce the display off + * then on. Ideally we'd get a notification from userspace + * when this happens but we don't so just wait a bit and hope + * we waited long enough. It'd be better if we'd only do + * this on systems that need it. */ - schedule_delayed_work(®ulator_init_complete_work, - msecs_to_jiffies(30000)); + schedule_delayed_work(®ulator_init_complete_work, delay * HZ); return 0; } diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c index d3ce0278bfbe..d8112f56e94e 100644 --- a/drivers/regulator/da9062-regulator.c +++ b/drivers/regulator/da9062-regulator.c @@ -73,7 +73,7 @@ struct da9062_regulators { int irq_ldo_lim; unsigned n_regulators; /* Array size to be defined during init. Keep at end. */ - struct da9062_regulator regulator[0]; + struct da9062_regulator regulator[]; }; /* Regulator operations */ diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 2aceb3b7afc2..e1d6c8f6d40b 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -66,7 +66,7 @@ struct da9063_regulator_data { }; struct da9063_regulators_pdata { - unsigned n_regulators; + unsigned int n_regulators; struct da9063_regulator_data *regulator_data; }; @@ -100,6 +100,7 @@ struct da9063_regulator_info { .desc.vsel_mask = DA9063_V##regl_name##_MASK, \ .desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \ + .suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_LDO_CONF), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B @@ -124,6 +125,7 @@ struct da9063_regulator_info { .desc.vsel_mask = DA9063_VBUCK_MASK, \ .desc.linear_min_sel = DA9063_VBUCK_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \ + .suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_BUCK_CONF), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \ .mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK) @@ -131,7 +133,7 @@ struct da9063_regulator_info { /* Defines asignment of regulators info table to chip model */ struct da9063_dev_model { const struct da9063_regulator_info *regulator_info; - unsigned n_regulators; + unsigned int n_regulators; enum da9063_type type; }; @@ -150,9 +152,9 @@ struct da9063_regulator { /* Encapsulates all information for the regulators driver */ struct da9063_regulators { - unsigned n_regulators; + unsigned int n_regulators; /* Array size to be defined during init. Keep at end. */ - struct da9063_regulator regulator[0]; + struct da9063_regulator regulator[]; }; /* BUCK modes for DA9063 */ @@ -165,38 +167,46 @@ enum { /* Regulator operations */ -/* Current limits array (in uA) for BCORE1, BCORE2, BPRO. - Entry indexes corresponds to register values. */ +/* + * Current limits array (in uA) for BCORE1, BCORE2, BPRO. + * Entry indexes corresponds to register values. + */ static const unsigned int da9063_buck_a_limits[] = { 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000 }; -/* Current limits array (in uA) for BMEM, BIO, BPERI. - Entry indexes corresponds to register values. */ +/* + * Current limits array (in uA) for BMEM, BIO, BPERI. + * Entry indexes corresponds to register values. + */ static const unsigned int da9063_buck_b_limits[] = { 1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000 }; -/* Current limits array (in uA) for merged BCORE1 and BCORE2. - Entry indexes corresponds to register values. */ +/* + * Current limits array (in uA) for merged BCORE1 and BCORE2. + * Entry indexes corresponds to register values. + */ static const unsigned int da9063_bcores_merged_limits[] = { 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000 }; -/* Current limits array (in uA) for merged BMEM and BIO. - Entry indexes corresponds to register values. */ +/* + * Current limits array (in uA) for merged BMEM and BIO. + * Entry indexes corresponds to register values. + */ static const unsigned int da9063_bmem_bio_merged_limits[] = { 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 }; -static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode) +static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); - unsigned val; + unsigned int val; switch (mode) { case REGULATOR_MODE_FAST: @@ -221,10 +231,9 @@ static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode) * There are 3 modes to map to: FAST, NORMAL, and STANDBY. */ -static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) +static unsigned int da9063_buck_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); - struct regmap_field *field; unsigned int val; int ret; @@ -245,18 +254,7 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } - /* Detect current regulator state */ - ret = regmap_field_read(regl->suspend, &val); - if (ret < 0) - return 0; - - /* Read regulator mode from proper register, depending on state */ - if (val) - field = regl->suspend_sleep; - else - field = regl->sleep; - - ret = regmap_field_read(field, &val); + ret = regmap_field_read(regl->sleep, &val); if (ret < 0) return 0; @@ -271,10 +269,10 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) * There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state. */ -static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) +static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); - unsigned val; + unsigned int val; switch (mode) { case REGULATOR_MODE_NORMAL: @@ -290,24 +288,12 @@ static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) return regmap_field_write(regl->sleep, val); } -static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev) +static unsigned int da9063_ldo_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); - struct regmap_field *field; int ret, val; - /* Detect current regulator state */ - ret = regmap_field_read(regl->suspend, &val); - if (ret < 0) - return 0; - - /* Read regulator mode from proper register, depending on state */ - if (val) - field = regl->suspend_sleep; - else - field = regl->sleep; - - ret = regmap_field_read(field, &val); + ret = regmap_field_read(regl->sleep, &val); if (ret < 0) return 0; @@ -383,7 +369,8 @@ static int da9063_suspend_disable(struct regulator_dev *rdev) return regmap_field_write(regl->suspend, 0); } -static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) +static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, + unsigned int mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); int val; @@ -405,10 +392,11 @@ static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mod return regmap_field_write(regl->mode, val); } -static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) +static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, + unsigned int mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); - unsigned val; + unsigned int val; switch (mode) { case REGULATOR_MODE_NORMAL: @@ -465,42 +453,36 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { da9063_buck_a_limits, DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BCORE1), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), }, { DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570, da9063_buck_a_limits, DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BCORE2), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL), }, { DA9063_BUCK(DA9063, BPRO, 530, 10, 1800, da9063_buck_a_limits, DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BPRO), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL), }, { DA9063_BUCK(DA9063, BMEM, 800, 20, 3340, da9063_buck_b_limits, DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BMEM), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), }, { DA9063_BUCK(DA9063, BIO, 800, 20, 3340, da9063_buck_b_limits, DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BIO), - .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL), }, { DA9063_BUCK(DA9063, BPERI, 800, 20, 3340, da9063_buck_b_limits, DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK), DA9063_BUCK_COMMON_FIELDS(BPERI), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL), }, { DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570, @@ -508,7 +490,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), /* BCORES_MERGED uses the same register fields as BCORE1 */ DA9063_BUCK_COMMON_FIELDS(BCORE1), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), }, { DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340, @@ -516,21 +497,17 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), /* BMEM_BIO_MERGED uses the same register fields as BMEM */ DA9063_BUCK_COMMON_FIELDS(BMEM), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), }, { DA9063_LDO(DA9063, LDO3, 900, 20, 3440), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM), }, { DA9063_LDO(DA9063, LDO7, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM), }, { DA9063_LDO(DA9063, LDO8, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM), }, { @@ -539,36 +516,29 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { }, { DA9063_LDO(DA9063, LDO11, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM), }, /* The following LDOs are present only on DA9063, not on DA9063L */ { DA9063_LDO(DA9063, LDO1, 600, 20, 1860), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL), }, { DA9063_LDO(DA9063, LDO2, 600, 20, 1860), - .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL), }, { DA9063_LDO(DA9063, LDO4, 900, 20, 3440), - .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM), }, { DA9063_LDO(DA9063, LDO5, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO5_CONT, DA9063_VLDO5_SEL), }, { DA9063_LDO(DA9063, LDO6, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL), }, { DA9063_LDO(DA9063, LDO10, 900, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL), }, }; @@ -593,7 +563,7 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data) struct da9063_regulators *regulators = data; struct da9063 *hw = regulators->regulator[0].hw; struct da9063_regulator *regl; - int bits, i , ret; + int bits, i, ret; ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits); if (ret < 0) @@ -605,10 +575,10 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data) continue; if (BIT(regl->info->oc_event.lsb) & bits) { - regulator_lock(regl->rdev); + regulator_lock(regl->rdev); regulator_notifier_call_chain(regl->rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); - regulator_unlock(regl->rdev); + regulator_unlock(regl->rdev); } } @@ -833,7 +803,7 @@ static int da9063_regulator_probe(struct platform_device *pdev) if (regl->info->suspend_sleep.reg) { regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, - da9063->regmap, regl->info->suspend_sleep); + da9063->regmap, regl->info->suspend_sleep); if (IS_ERR(regl->suspend_sleep)) return PTR_ERR(regl->suspend_sleep); } @@ -867,12 +837,10 @@ static int da9063_regulator_probe(struct platform_device *pdev) NULL, da9063_ldo_lim_event, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "LDO_LIM", regulators); - if (ret) { + if (ret) dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n"); - return ret; - } - return 0; + return ret; } static struct platform_driver da9063_regulator_driver = { diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c new file mode 100644 index 000000000000..67ce1b52a1a1 --- /dev/null +++ b/drivers/regulator/mp5416.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// mp5416.c - regulator driver for mps mp5416 +// +// Copyright 2020 Monolithic Power Systems, Inc +// +// Author: Saravanan Sekar <sravanhome@gmail.com> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/i2c.h> + +#define MP5416_REG_CTL0 0x00 +#define MP5416_REG_CTL1 0x01 +#define MP5416_REG_CTL2 0x02 +#define MP5416_REG_ILIM 0x03 +#define MP5416_REG_BUCK1 0x04 +#define MP5416_REG_BUCK2 0x05 +#define MP5416_REG_BUCK3 0x06 +#define MP5416_REG_BUCK4 0x07 +#define MP5416_REG_LDO1 0x08 +#define MP5416_REG_LDO2 0x09 +#define MP5416_REG_LDO3 0x0a +#define MP5416_REG_LDO4 0x0b + +#define MP5416_REGULATOR_EN BIT(7) +#define MP5416_MASK_VSET 0x7f +#define MP5416_MASK_BUCK1_ILIM 0xc0 +#define MP5416_MASK_BUCK2_ILIM 0x0c +#define MP5416_MASK_BUCK3_ILIM 0x30 +#define MP5416_MASK_BUCK4_ILIM 0x03 +#define MP5416_MASK_DVS_SLEWRATE 0xc0 + +/* values in uV */ +#define MP5416_VOLT1_MIN 600000 +#define MP5416_VOLT1_MAX 2187500 +#define MP5416_VOLT1_STEP 12500 +#define MP5416_VOLT2_MIN 800000 +#define MP5416_VOLT2_MAX 3975000 +#define MP5416_VOLT2_STEP 25000 + +#define MP5416_VOLT1_RANGE \ + ((MP5416_VOLT1_MAX - MP5416_VOLT1_MIN)/MP5416_VOLT1_STEP + 1) +#define MP5416_VOLT2_RANGE \ + ((MP5416_VOLT2_MAX - MP5416_VOLT2_MIN)/MP5416_VOLT2_STEP + 1) + +#define MP5416BUCK(_name, _id, _ilim, _dreg, _dval, _vsel) \ + [MP5416_BUCK ## _id] = { \ + .id = MP5416_BUCK ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = &mp5416_buck_ops, \ + .min_uV = MP5416_VOLT ##_vsel## _MIN, \ + .uV_step = MP5416_VOLT ##_vsel## _STEP, \ + .n_voltages = MP5416_VOLT ##_vsel## _RANGE, \ + .curr_table = _ilim, \ + .n_current_limits = ARRAY_SIZE(_ilim), \ + .csel_reg = MP5416_REG_ILIM, \ + .csel_mask = MP5416_MASK_BUCK ## _id ##_ILIM, \ + .vsel_reg = MP5416_REG_BUCK ## _id, \ + .vsel_mask = MP5416_MASK_VSET, \ + .enable_reg = MP5416_REG_BUCK ## _id, \ + .enable_mask = MP5416_REGULATOR_EN, \ + .active_discharge_on = _dval, \ + .active_discharge_reg = _dreg, \ + .active_discharge_mask = _dval, \ + .owner = THIS_MODULE, \ + } + +#define MP5416LDO(_name, _id, _dval) \ + [MP5416_LDO ## _id] = { \ + .id = MP5416_LDO ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = &mp5416_ldo_ops, \ + .min_uV = MP5416_VOLT2_MIN, \ + .uV_step = MP5416_VOLT2_STEP, \ + .n_voltages = MP5416_VOLT2_RANGE, \ + .vsel_reg = MP5416_REG_LDO ##_id, \ + .vsel_mask = MP5416_MASK_VSET, \ + .enable_reg = MP5416_REG_LDO ##_id, \ + .enable_mask = MP5416_REGULATOR_EN, \ + .active_discharge_on = _dval, \ + .active_discharge_reg = MP5416_REG_CTL2, \ + .active_discharge_mask = _dval, \ + .owner = THIS_MODULE, \ + } + +enum mp5416_regulators { + MP5416_BUCK1, + MP5416_BUCK2, + MP5416_BUCK3, + MP5416_BUCK4, + MP5416_LDO1, + MP5416_LDO2, + MP5416_LDO3, + MP5416_LDO4, + MP5416_MAX_REGULATORS, +}; + +static const struct regmap_config mp5416_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x0d, +}; + +/* Current limits array (in uA) + * ILIM1 & ILIM3 + */ +static const unsigned int mp5416_I_limits1[] = { + 3800000, 4600000, 5600000, 6800000 +}; + +/* ILIM2 & ILIM4 */ +static const unsigned int mp5416_I_limits2[] = { + 2200000, 3200000, 4200000, 5200000 +}; + +static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); + +static const struct regulator_ops mp5416_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops mp5416_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, + .set_ramp_delay = mp5416_set_ramp_delay, +}; + +static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { + MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1), + MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2), + MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1), + MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 2), + MP5416LDO("ldo1", 1, BIT(4)), + MP5416LDO("ldo2", 2, BIT(3)), + MP5416LDO("ldo3", 3, BIT(2)), + MP5416LDO("ldo4", 4, BIT(1)), +}; + +/* + * DVS ramp rate BUCK1 to BUCK4 + * 00: 32mV/us + * 01: 16mV/us + * 10: 8mV/us + * 11: 4mV/us + */ +static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val; + + if (ramp_delay > 32000 || ramp_delay < 0) + return -EINVAL; + + if (ramp_delay <= 4000) + ramp_val = 3; + else if (ramp_delay <= 8000) + ramp_val = 2; + else if (ramp_delay <= 16000) + ramp_val = 1; + else + ramp_val = 0; + + return regmap_update_bits(rdev->regmap, MP5416_REG_CTL2, + MP5416_MASK_DVS_SLEWRATE, ramp_val << 6); +} + +static int mp5416_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct regmap *regmap; + int i; + + regmap = devm_regmap_init_i2c(client, &mp5416_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + + config.dev = dev; + config.regmap = regmap; + + for (i = 0; i < MP5416_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &mp5416_regulators_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator!\n"); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct of_device_id mp5416_of_match[] = { + { .compatible = "mps,mp5416" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mp5416_of_match); + +static const struct i2c_device_id mp5416_id[] = { + { "mp5416", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp5416_id); + +static struct i2c_driver mp5416_regulator_driver = { + .driver = { + .name = "mp5416", + .of_match_table = of_match_ptr(mp5416_of_match), + }, + .probe_new = mp5416_i2c_probe, + .id_table = mp5416_id, +}; +module_i2c_driver(mp5416_regulator_driver); + +MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); +MODULE_DESCRIPTION("MP5416 PMIC regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c index 1d26b506ee5b..6ed987648188 100644 --- a/drivers/regulator/mp8859.c +++ b/drivers/regulator/mp8859.c @@ -95,6 +95,7 @@ static const struct regulator_desc mp8859_regulators[] = { .id = 0, .type = REGULATOR_VOLTAGE, .name = "mp8859_dcdc", + .supply_name = "vin", .of_match = of_match_ptr("mp8859_dcdc"), .n_voltages = VOL_MAX_IDX + 1, .linear_ranges = mp8859_dcdc_ranges, diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c new file mode 100644 index 000000000000..1786f7162019 --- /dev/null +++ b/drivers/regulator/mp886x.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// MP8867/MP8869 regulator driver +// +// Copyright (C) 2020 Synaptics Incorporated +// +// Author: Jisheng Zhang <jszhang@kernel.org> + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +#define MP886X_VSEL 0x00 +#define MP886X_V_BOOT (1 << 7) +#define MP886X_SYSCNTLREG1 0x01 +#define MP886X_MODE (1 << 0) +#define MP886X_GO (1 << 6) +#define MP886X_EN (1 << 7) + +struct mp886x_device_info { + struct device *dev; + struct regulator_desc desc; + struct regulator_init_data *regulator; + struct gpio_desc *en_gpio; + u32 r[2]; + unsigned int sel; +}; + +static int mp886x_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + switch (mode) { + case REGULATOR_MODE_FAST: + regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1, + MP886X_MODE, MP886X_MODE); + break; + case REGULATOR_MODE_NORMAL: + regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1, + MP886X_MODE, 0); + break; + default: + return -EINVAL; + } + return 0; +} + +static unsigned int mp886x_get_mode(struct regulator_dev *rdev) +{ + u32 val; + int ret; + + ret = regmap_read(rdev->regmap, MP886X_SYSCNTLREG1, &val); + if (ret < 0) + return ret; + if (val & MP886X_MODE) + return REGULATOR_MODE_FAST; + else + return REGULATOR_MODE_NORMAL; +} + +static int mp8869_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + int ret; + + ret = regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1, + MP886X_GO, MP886X_GO); + if (ret < 0) + return ret; + + sel <<= ffs(rdev->desc->vsel_mask) - 1; + return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, + MP886X_V_BOOT | rdev->desc->vsel_mask, sel); +} + +static inline unsigned int mp8869_scale(unsigned int uv, u32 r1, u32 r2) +{ + u32 tmp = uv * r1 / r2; + + return uv + tmp; +} + +static int mp8869_get_voltage_sel(struct regulator_dev *rdev) +{ + struct mp886x_device_info *di = rdev_get_drvdata(rdev); + int ret, uv; + unsigned int val; + bool fbloop; + + ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); + if (ret) + return ret; + + fbloop = val & MP886X_V_BOOT; + if (fbloop) { + uv = rdev->desc->min_uV; + uv = mp8869_scale(uv, di->r[0], di->r[1]); + return regulator_map_voltage_linear(rdev, uv, uv); + } + + val &= rdev->desc->vsel_mask; + val >>= ffs(rdev->desc->vsel_mask) - 1; + + return val; +} + +static const struct regulator_ops mp8869_regulator_ops = { + .set_voltage_sel = mp8869_set_voltage_sel, + .get_voltage_sel = mp8869_get_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .map_voltage = regulator_map_voltage_linear, + .list_voltage = regulator_list_voltage_linear, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_mode = mp886x_set_mode, + .get_mode = mp886x_get_mode, +}; + +static int mp8867_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + struct mp886x_device_info *di = rdev_get_drvdata(rdev); + int ret, delta; + + ret = mp8869_set_voltage_sel(rdev, sel); + if (ret < 0) + return ret; + + delta = di->sel - sel; + if (abs(delta) <= 5) + ret = regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1, + MP886X_GO, 0); + di->sel = sel; + + return ret; +} + +static int mp8867_get_voltage_sel(struct regulator_dev *rdev) +{ + struct mp886x_device_info *di = rdev_get_drvdata(rdev); + int ret, uv; + unsigned int val; + bool fbloop; + + ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); + if (ret) + return ret; + + fbloop = val & MP886X_V_BOOT; + + val &= rdev->desc->vsel_mask; + val >>= ffs(rdev->desc->vsel_mask) - 1; + + if (fbloop) { + uv = regulator_list_voltage_linear(rdev, val); + uv = mp8869_scale(uv, di->r[0], di->r[1]); + return regulator_map_voltage_linear(rdev, uv, uv); + } + + return val; +} + +static const struct regulator_ops mp8867_regulator_ops = { + .set_voltage_sel = mp8867_set_voltage_sel, + .get_voltage_sel = mp8867_get_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .map_voltage = regulator_map_voltage_linear, + .list_voltage = regulator_list_voltage_linear, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_mode = mp886x_set_mode, + .get_mode = mp886x_get_mode, +}; + +static int mp886x_regulator_register(struct mp886x_device_info *di, + struct regulator_config *config) +{ + struct regulator_desc *rdesc = &di->desc; + struct regulator_dev *rdev; + + rdesc->name = "mp886x-reg"; + rdesc->supply_name = "vin"; + rdesc->ops = of_device_get_match_data(di->dev); + rdesc->type = REGULATOR_VOLTAGE; + rdesc->n_voltages = 128; + rdesc->enable_reg = MP886X_SYSCNTLREG1; + rdesc->enable_mask = MP886X_EN; + rdesc->min_uV = 600000; + rdesc->uV_step = 10000; + rdesc->vsel_reg = MP886X_VSEL; + rdesc->vsel_mask = 0x3f; + rdesc->owner = THIS_MODULE; + + rdev = devm_regulator_register(di->dev, &di->desc, config); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + di->sel = rdesc->ops->get_voltage_sel(rdev); + return 0; +} + +static const struct regmap_config mp886x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int mp886x_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *np = dev->of_node; + struct mp886x_device_info *di; + struct regulator_config config = { }; + struct regmap *regmap; + int ret; + + di = devm_kzalloc(dev, sizeof(struct mp886x_device_info), GFP_KERNEL); + if (!di) + return -ENOMEM; + + di->regulator = of_get_regulator_init_data(dev, np, &di->desc); + if (!di->regulator) { + dev_err(dev, "Platform data not found!\n"); + return -EINVAL; + } + + ret = of_property_read_u32_array(np, "mps,fb-voltage-divider", + di->r, 2); + if (ret) + return ret; + + di->en_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH); + if (IS_ERR(di->en_gpio)) + return PTR_ERR(di->en_gpio); + + di->dev = dev; + + regmap = devm_regmap_init_i2c(client, &mp886x_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + i2c_set_clientdata(client, di); + + config.dev = di->dev; + config.init_data = di->regulator; + config.regmap = regmap; + config.driver_data = di; + config.of_node = np; + + ret = mp886x_regulator_register(di, &config); + if (ret < 0) + dev_err(dev, "Failed to register regulator!\n"); + return ret; +} + +static const struct of_device_id mp886x_dt_ids[] = { + { + .compatible = "mps,mp8867", + .data = &mp8867_regulator_ops + }, + { + .compatible = "mps,mp8869", + .data = &mp8869_regulator_ops + }, + { } +}; +MODULE_DEVICE_TABLE(of, mp886x_dt_ids); + +static const struct i2c_device_id mp886x_id[] = { + { "mp886x", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp886x_id); + +static struct i2c_driver mp886x_regulator_driver = { + .driver = { + .name = "mp886x-regulator", + .of_match_table = of_match_ptr(mp886x_dt_ids), + }, + .probe = mp886x_i2c_probe, + .id_table = mp886x_id, +}; +module_i2c_driver(mp886x_regulator_driver); + +MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>"); +MODULE_DESCRIPTION("MP886x regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index e74e11101fc1..638329bd0745 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -354,7 +354,11 @@ static int pwm_regulator_probe(struct platform_device *pdev) drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); if (IS_ERR(drvdata->pwm)) { ret = PTR_ERR(drvdata->pwm); - dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret); + if (ret == -EPROBE_DEFER) + dev_dbg(&pdev->dev, + "Failed to get PWM, deferring probe\n"); + else + dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret); return ret; } diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 7407cd5a1b74..7fc97f23fcf4 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -925,12 +925,21 @@ static const struct rpm_regulator_data rpm_pm8921_regulators[] = { { } }; +static const struct rpm_regulator_data rpm_smb208_regulators[] = { + { "s1a", QCOM_RPM_SMB208_S1a, &smb208_smps, "vin_s1a" }, + { "s1b", QCOM_RPM_SMB208_S1b, &smb208_smps, "vin_s1b" }, + { "s2a", QCOM_RPM_SMB208_S2a, &smb208_smps, "vin_s2a" }, + { "s2b", QCOM_RPM_SMB208_S2b, &smb208_smps, "vin_s2b" }, + { } +}; + static const struct of_device_id rpm_of_match[] = { { .compatible = "qcom,rpm-pm8018-regulators", .data = &rpm_pm8018_regulators }, { .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators }, { .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators }, { .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators }, + { .compatible = "qcom,rpm-smb208-regulators", .data = &rpm_smb208_regulators }, { } }; MODULE_DEVICE_TABLE(of, rpm_of_match); diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index fff8d5fdef6a..fdde4195cefb 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -445,6 +445,44 @@ static const struct regulator_desc pm8994_lnldo = { .ops = &rpm_smps_ldo_ops_fixed, }; +static const struct regulator_desc pmi8994_ftsmps = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000), + REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000), + }, + .n_linear_ranges = 2, + .n_voltages = 350, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pmi8994_hfsmps = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(350000, 0, 80, 12500), + REGULATOR_LINEAR_RANGE(700000, 81, 141, 25000), + }, + .n_linear_ranges = 2, + .n_voltages = 142, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pmi8994_bby = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(3000000, 0, 44, 50000), + }, + .n_linear_ranges = 1, + .n_voltages = 45, + .ops = &rpm_bob_ops, +}; + +static const struct regulator_desc pmi8994_boost = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000), + }, + .n_linear_ranges = 1, + .n_voltages = 31, + .ops = &rpm_smps_ldo_ops, +}; + static const struct regulator_desc pm8998_ftsmps = { .linear_ranges = (struct regulator_linear_range[]) { REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000), @@ -780,6 +818,14 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = { {} }; +static const struct rpm_regulator_data rpm_pmi8994_regulators[] = { + { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" }, + { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" }, + { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, + { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" }, + {} +}; + static const struct rpm_regulator_data rpm_pm8998_regulators[] = { { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_ftsmps, "vdd_s1" }, { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_ftsmps, "vdd_s2" }, @@ -862,6 +908,7 @@ static const struct of_device_id rpm_of_match[] = { { .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators }, { .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators }, { .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators }, + { .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators }, { .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators }, { .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators }, {} diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index bdfaf7edb75a..992bc18101ef 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev) } val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); - val = (val & ~STM32_ENVR) | STM32_HIZ; + val &= ~STM32_ENVR; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); pm_runtime_mark_last_busy(priv->dev); @@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = { .volt_table = stm32_vrefbuf_voltages, .n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages), .ops = &stm32_vrefbuf_volt_ops, + .off_on_delay = 1000, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }; diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 461b0e506a26..d9efbfd29646 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -51,6 +51,7 @@ config RESET_BRCMSTB config RESET_BRCMSTB_RESCAL bool "Broadcom STB RESCAL reset controller" + depends on HAS_IOMEM default ARCH_BRCMSTB || COMPILE_TEST help This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on @@ -73,7 +74,7 @@ config RESET_IMX7 config RESET_INTEL_GW bool "Intel Reset Controller Driver" - depends on OF + depends on OF && HAS_IOMEM select REGMAP_MMIO help This enables the reset controller driver for Intel Gateway SoCs. diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 34c8b6c7e095..8e503881d9d6 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -327,6 +327,7 @@ config RTC_DRV_MAX6900 config RTC_DRV_MAX8907 tristate "Maxim MAX8907" depends on MFD_MAX8907 || COMPILE_TEST + select REGMAP_IRQ help If you say yes here you will get support for the RTC of Maxim MAX8907 PMIC. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 4ac8f19fb631..24c7dfa1bd7d 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -12,10 +12,6 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o rtc-core-y := class.o interface.o -ifdef CONFIG_RTC_DRV_EFI -rtc-core-y += rtc-efi-platform.o -endif - rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index b795fe4cbd2e..82bfe009a50f 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1345,7 +1345,7 @@ static const struct pnp_device_id rtc_ids[] = { MODULE_DEVICE_TABLE(pnp, rtc_ids); static struct pnp_driver cmos_pnp_driver = { - .name = (char *) driver_name, + .name = driver_name, .id_table = rtc_ids, .probe = cmos_pnp_probe, .remove = cmos_pnp_remove, diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c deleted file mode 100644 index 6c037dc4e3dc..000000000000 --- a/drivers/rtc/rtc-efi-platform.c +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Moved from arch/ia64/kernel/time.c - * - * Copyright (C) 1998-2003 Hewlett-Packard Co - * Stephane Eranian <eranian@hpl.hp.com> - * David Mosberger <davidm@hpl.hp.com> - * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> - * Copyright (C) 1999-2000 VA Linux Systems - * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/efi.h> -#include <linux/platform_device.h> - -static struct platform_device rtc_efi_dev = { - .name = "rtc-efi", - .id = -1, -}; - -static int __init rtc_init(void) -{ - if (efi_enabled(EFI_RUNTIME_SERVICES)) - if (platform_device_register(&rtc_efi_dev) < 0) - pr_err("unable to register rtc device...\n"); - - /* not necessarily an error */ - return 0; -} -module_init(rtc_init); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 6cca72782af6..cf87eb27879f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void) (unsigned long) block); INIT_LIST_HEAD(&block->ccw_queue); spin_lock_init(&block->queue_lock); + INIT_LIST_HEAD(&block->format_list); + spin_lock_init(&block->format_lock); timer_setup(&block->timer, dasd_block_timeout, 0); spin_lock_init(&block->profile.lock); @@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (dasd_ese_needs_format(cqr->block, irb)) { if (rq_data_dir((struct request *)cqr->callback_data) == READ) { - device->discipline->ese_read(cqr); + device->discipline->ese_read(cqr, irb); cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; dasd_device_clear_timer(device); dasd_schedule_device_bh(device); return; } - fcqr = device->discipline->ese_format(device, cqr); + fcqr = device->discipline->ese_format(device, cqr, irb); if (IS_ERR(fcqr)) { + if (PTR_ERR(fcqr) == -EINVAL) { + cqr->status = DASD_CQR_ERROR; + return; + } /* * If we can't format now, let the request go * one extra round. Maybe we can format later. */ cqr->status = DASD_CQR_QUEUED; + dasd_schedule_device_bh(device); + return; } else { fcqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED; @@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) { struct request *req; blk_status_t error = BLK_STS_OK; + unsigned int proc_bytes; int status; req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); + proc_bytes = cqr->proc_bytes; status = cqr->block->base->discipline->free_cp(cqr, req); if (status < 0) error = errno_to_blk_status(status); @@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_mq_end_request(req, error); blk_mq_run_hw_queues(req->q, true); } else { - blk_mq_complete_request(req); + /* + * Partial completed requests can happen with ESE devices. + * During read we might have gotten a NRF error and have to + * complete a request partially. + */ + if (proc_bytes) { + blk_update_request(req, BLK_STS_OK, + blk_rq_bytes(req) - proc_bytes); + blk_mq_requeue_request(req, true); + } else { + blk_mq_complete_request(req); + } } } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a28b9ff82378..ad44d22e8859 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) geo->head |= head; } +/* + * calculate failing track from sense data depending if + * it is an EAV device or not + */ +static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, + sector_t *track) +{ + struct dasd_eckd_private *private = device->private; + u8 *sense = NULL; + u32 cyl; + u8 head; + + sense = dasd_get_sense(irb); + if (!sense) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no sense data\n"); + return -EINVAL; + } + if (!(sense[27] & DASD_SENSE_BIT_2)) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no valid track data\n"); + return -EINVAL; + } + + if (sense[27] & DASD_SENSE_BIT_3) { + /* enhanced addressing */ + cyl = sense[30] << 20; + cyl |= (sense[31] & 0xF0) << 12; + cyl |= sense[28] << 8; + cyl |= sense[29]; + } else { + cyl = sense[29] << 8; + cyl |= sense[30]; + } + head = sense[31] & 0x0F; + *track = cyl * private->rdc_data.trk_per_cyl + head; + return 0; +} + static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, struct dasd_device *device) { @@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base, 0, NULL); } +static bool test_and_set_format_track(struct dasd_format_entry *to_format, + struct dasd_block *block) +{ + struct dasd_format_entry *format; + unsigned long flags; + bool rc = false; + + spin_lock_irqsave(&block->format_lock, flags); + list_for_each_entry(format, &block->format_list, list) { + if (format->track == to_format->track) { + rc = true; + goto out; + } + } + list_add_tail(&to_format->list, &block->format_list); + +out: + spin_unlock_irqrestore(&block->format_lock, flags); + return rc; +} + +static void clear_format_track(struct dasd_format_entry *format, + struct dasd_block *block) +{ + unsigned long flags; + + spin_lock_irqsave(&block->format_lock, flags); + list_del_init(&format->list); + spin_unlock_irqrestore(&block->format_lock, flags); +} + /* * Callback function to free ESE format requests. */ @@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) { struct dasd_device *device = cqr->startdev; struct dasd_eckd_private *private = device->private; + struct dasd_format_entry *format = data; + clear_format_track(format, cqr->basedev->block); private->count--; dasd_ffree_request(cqr, device); } static struct dasd_ccw_req * -dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) +dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, + struct irb *irb) { struct dasd_eckd_private *private; + struct dasd_format_entry *format; struct format_data_t fdata; unsigned int recs_per_trk; struct dasd_ccw_req *fcqr; @@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) struct request *req; sector_t first_trk; sector_t last_trk; + sector_t curr_trk; int rc; req = cqr->callback_data; - base = cqr->block->base; + block = cqr->block; + base = block->base; private = base->private; - block = base->block; blksize = block->bp_block; recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + format = &startdev->format_entry; first_trk = blk_rq_pos(req) >> block->s2b_shift; sector_div(first_trk, recs_per_trk); last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return ERR_PTR(rc); - fdata.start_unit = first_trk; - fdata.stop_unit = last_trk; + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, startdev, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return ERR_PTR(-EINVAL); + } + format->track = curr_trk; + /* test if track is already in formatting by another thread */ + if (test_and_set_format_track(format, block)) + return ERR_PTR(-EEXIST); + + fdata.start_unit = curr_trk; + fdata.stop_unit = curr_trk; fdata.blksize = blksize; fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; @@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) return fcqr; fcqr->callback = dasd_eckd_ese_format_cb; + fcqr->callback_data = (void *) format; return fcqr; } @@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) /* * When data is read from an unformatted area of an ESE volume, this function * returns zeroed data and thereby mimics a read of zero data. + * + * The first unformatted track is the one that got the NRF error, the address is + * encoded in the sense data. + * + * All tracks before have returned valid data and should not be touched. + * All tracks after the unformatted track might be formatted or not. This is + * currently not known, remember the processed data and return the remainder of + * the request to the blocklayer in __dasd_cleanup_cqr(). */ -static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr) +static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) { + struct dasd_eckd_private *private; + sector_t first_trk, last_trk; + sector_t first_blk, last_blk; unsigned int blksize, off; + unsigned int recs_per_trk; struct dasd_device *base; struct req_iterator iter; + struct dasd_block *block; + unsigned int skip_block; + unsigned int blk_count; struct request *req; struct bio_vec bv; + sector_t curr_trk; + sector_t end_blk; char *dst; + int rc; req = (struct request *) cqr->callback_data; base = cqr->block->base; blksize = base->block->bp_block; + block = cqr->block; + private = base->private; + skip_block = 0; + blk_count = 0; + + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; + sector_div(first_trk, recs_per_trk); + last_trk = last_blk = + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; + sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return rc; + + /* sanity check if the current track from sense data is valid */ + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, base, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return -EINVAL; + } + + /* + * if not the first track got the NRF error we have to skip over valid + * blocks + */ + if (curr_trk != first_trk) + skip_block = curr_trk * recs_per_trk - first_blk; + + /* we have no information beyond the current track */ + end_blk = (curr_trk + 1) * recs_per_trk; rq_for_each_segment(bv, req, iter) { dst = page_address(bv.bv_page) + bv.bv_offset; for (off = 0; off < bv.bv_len; off += blksize) { - if (dst && rq_data_dir(req) == READ) { + if (first_blk + blk_count >= end_blk) { + cqr->proc_bytes = blk_count * blksize; + return 0; + } + if (dst && !skip_block) { dst += off; memset(dst, 0, blksize); + } else { + skip_block--; } + blk_count++; } } + return 0; } /* diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 91c9f9586e0f..fa552f9f1666 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -187,6 +187,7 @@ struct dasd_ccw_req { void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; + unsigned int proc_bytes; /* bytes for partial completion */ }; /* @@ -387,8 +388,9 @@ struct dasd_discipline { int (*ext_pool_warn_thrshld)(struct dasd_device *); int (*ext_pool_oos)(struct dasd_device *); int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); - struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *); - void (*ese_read)(struct dasd_ccw_req *); + struct dasd_ccw_req *(*ese_format)(struct dasd_device *, + struct dasd_ccw_req *, struct irb *); + int (*ese_read)(struct dasd_ccw_req *, struct irb *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -474,6 +476,11 @@ struct dasd_profile { spinlock_t lock; }; +struct dasd_format_entry { + struct list_head list; + sector_t track; +}; + struct dasd_device { /* Block device stuff. */ struct dasd_block *block; @@ -539,6 +546,7 @@ struct dasd_device { struct dentry *debugfs_dentry; struct dentry *hosts_dentry; struct dasd_profile profile; + struct dasd_format_entry format_entry; }; struct dasd_block { @@ -564,6 +572,9 @@ struct dasd_block { struct dentry *debugfs_dentry; struct dasd_profile profile; + + struct list_head format_list; + spinlock_t format_lock; }; struct dasd_attention_data { diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 63502ca537eb..80d22290f268 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -636,10 +636,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; - dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL); + dev_info->dcssblk_queue = + blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; - blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 3df5d68d09f0..45a04daec89e 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -343,14 +343,14 @@ static int __init xpram_setup_blkdev(void) xpram_disks[i] = alloc_disk(1); if (!xpram_disks[i]) goto out; - xpram_queues[i] = blk_alloc_queue(GFP_KERNEL); + xpram_queues[i] = blk_alloc_queue(xpram_make_request, + NUMA_NO_NODE); if (!xpram_queues[i]) { put_disk(xpram_disks[i]); goto out; } blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); - blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index da642e811f7f..4dd2eb634856 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -303,8 +303,10 @@ static void * cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct ccwdev_iter *iter; + loff_t p = *offset; - if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + (*offset)++; + if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = it; if (iter->devno == __MAX_SUBCHANNEL) { @@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) return NULL; } else iter->devno++; - (*offset)++; return iter; } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 51038ec309c1..dfcbe54591fb 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -135,7 +135,7 @@ static ssize_t chp_measurement_chars_read(struct file *filp, struct channel_path *chp; struct device *device; - device = container_of(kobj, struct device, kobj); + device = kobj_to_dev(kobj); chp = to_channelpath(device); if (chp->cmg == -1) return 0; @@ -184,7 +184,7 @@ static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj, struct device *device; unsigned int size; - device = container_of(kobj, struct device, kobj); + device = kobj_to_dev(kobj); chp = to_channelpath(device); css = to_css(chp->dev.parent); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 3ab8e80d7bbc..e115623b86b2 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/io.h> #include <asm/qdio.h> #include "cio.h" @@ -205,7 +206,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, /* fill in sl */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); } static void setup_queues(struct qdio_irq *irq_ptr, diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index d4caf46ff9df..2afe2153b34e 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -887,7 +887,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, /* empty pin tag */ *p++ = 0x04; *p++ = 0; - /* encrytped key value tag and bytes */ + /* encrypted key value tag and bytes */ p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ @@ -1095,7 +1095,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 1: generate AES 256 bit random kek key */ rc = ep11_genaeskey(card, domain, 256, - 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */ + 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ kek, &keklen); if (rc) { DEBUG_ERR( diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 9575a627a1e1..468cada49e72 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -369,7 +369,7 @@ enum qeth_qdio_info_states { struct qeth_buffer_pool_entry { struct list_head list; struct list_head init_list; - void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; + struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; }; struct qeth_qdio_buffer_pool { @@ -983,7 +983,7 @@ extern const struct attribute_group qeth_device_blkt_group; extern const struct device_type qeth_generic_devtype; const char *qeth_get_cardname_short(struct qeth_card *); -int qeth_realloc_buffer_pool(struct qeth_card *, int); +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9639938581f5..6d3f2f14b414 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key; static void qeth_issue_next_read_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned int data_length); -static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, @@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); +static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { + if (entry->elements[i]) + __free_page(entry->elements[i]); + } + + kfree(entry); +} + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) +{ + struct qeth_buffer_pool_entry *entry; + unsigned int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + for (i = 0; i < pages; i++) { + entry->elements[i] = alloc_page(GFP_KERNEL); + + if (!entry->elements[i]) { + qeth_free_pool_entry(entry); + return NULL; + } + } + + return entry; +} + static int qeth_alloc_buffer_pool(struct qeth_card *card) { - struct qeth_buffer_pool_entry *pool_entry; - void *ptr; - int i, j; + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int i; QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { - pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); - if (!pool_entry) { + struct qeth_buffer_pool_entry *entry; + + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { qeth_free_buffer_pool(card); return -ENOMEM; } - for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { - ptr = (void *) __get_free_page(GFP_KERNEL); - if (!ptr) { - while (j > 0) - free_page((unsigned long) - pool_entry->elements[--j]); - kfree(pool_entry); - qeth_free_buffer_pool(card); - return -ENOMEM; - } - pool_entry->elements[j] = ptr; - } - list_add(&pool_entry->init_list, - &card->qdio.init_pool.entry_list); + + list_add(&entry->init_list, &card->qdio.init_pool.entry_list); } return 0; } -int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) { + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; + struct qeth_buffer_pool_entry *entry, *tmp; + int delta = count - pool->buf_count; + LIST_HEAD(entries); + QETH_CARD_TEXT(card, 2, "realcbp"); - /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ - qeth_clear_working_pool_list(card); - qeth_free_buffer_pool(card); - card->qdio.in_buf_pool.buf_count = bufcnt; - card->qdio.init_pool.buf_count = bufcnt; - return qeth_alloc_buffer_pool(card); + /* Defer until queue is allocated: */ + if (!card->qdio.in_q) + goto out; + + /* Remove entries from the pool: */ + while (delta < 0) { + entry = list_first_entry(&pool->entry_list, + struct qeth_buffer_pool_entry, + init_list); + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + + delta++; + } + + /* Allocate additional entries: */ + while (delta > 0) { + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + list_for_each_entry_safe(entry, tmp, &entries, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + + return -ENOMEM; + } + + list_add(&entry->init_list, &entries); + + delta--; + } + + list_splice(&entries, &pool->entry_list); + +out: + card->qdio.in_buf_pool.buf_count = count; + pool->buf_count = count; + return 0; } -EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); static void qeth_free_qdio_queue(struct qeth_qdio_q *q) { @@ -1128,9 +1199,10 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_tx_complete_buf(buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { - if (buf->buffer->element[i].addr && buf->is_header[i]) - kmem_cache_free(qeth_core_header_cache, - buf->buffer->element[i].addr); + void *data = phys_to_virt(buf->buffer->element[i].addr); + + if (data && buf->is_header[i]) + kmem_cache_free(qeth_core_header_cache, data); buf->is_header[i] = 0; } @@ -1169,19 +1241,6 @@ void qeth_drain_output_queues(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_drain_output_queues); -static void qeth_free_buffer_pool(struct qeth_card *card) -{ - struct qeth_buffer_pool_entry *pool_entry, *tmp; - int i = 0; - list_for_each_entry_safe(pool_entry, tmp, - &card->qdio.init_pool.entry_list, init_list){ - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) - free_page((unsigned long)pool_entry->elements[i]); - list_del(&pool_entry->init_list); - kfree(pool_entry); - } -} - static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) { unsigned int count = single ? 1 : card->dev->num_tx_queues; @@ -1203,7 +1262,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) if (count == 1) dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); - card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; card->qdio.no_out_queues = count; return 0; } @@ -2392,7 +2450,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card) return; qeth_free_cq(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); @@ -2574,7 +2631,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; - struct page *page; if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; @@ -2583,7 +2639,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { + if (page_count(entry->elements[i]) > 1) { free = 0; break; } @@ -2598,15 +2654,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { - page = alloc_page(GFP_ATOMIC); - if (!page) { + if (page_count(entry->elements[i]) > 1) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) return NULL; - } else { - free_page((unsigned long)entry->elements[i]); - entry->elements[i] = page_address(page); - QETH_CARD_STAT_INC(card, rx_sg_alloc_page); - } + + __free_page(entry->elements[i]); + entry->elements[i] = page; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } list_del_init(&entry->list); @@ -2624,12 +2680,12 @@ static int qeth_init_input_buffer(struct qeth_card *card, ETH_HLEN + sizeof(struct ipv6hdr)); if (!buf->rx_skb) - return 1; + return -ENOMEM; } pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) - return 1; + return -ENOBUFS; /* * since the buffer is accessed only from the input_tasklet @@ -2641,7 +2697,8 @@ static int qeth_init_input_buffer(struct qeth_card *card, buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; - buf->buffer->element[i].addr = pool_entry->elements[i]; + buf->buffer->element[i].addr = + page_to_phys(pool_entry->elements[i]); if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else @@ -2673,10 +2730,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card) /* inbound queue */ qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); memset(&card->rx, 0, sizeof(struct qeth_rx)); + qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, @@ -3459,9 +3521,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { - unsigned long phys_aob_addr; + unsigned long phys_aob_addr = buffer->element[e].addr; - phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); ++e; } @@ -3750,7 +3811,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = data; + buffer->element[element].addr = virt_to_phys(data); buffer->element[element].length = elem_length; length -= elem_length; if (is_first_elem) { @@ -3780,7 +3841,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = data; + buffer->element[element].addr = virt_to_phys(data); buffer->element[element].length = elem_length; buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; @@ -3820,7 +3881,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, int element = buf->next_element_to_fill; is_first_elem = false; - buffer->element[element].addr = hdr; + buffer->element[element].addr = virt_to_phys(hdr); buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; /* remember to free cache-allocated qeth_hdr: */ @@ -4746,10 +4807,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card, if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[offset + i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.c_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[offset + i] = + card->qdio.c_q->bufs[i].buffer; queue_start_poll[card->qdio.no_in_queues - 1] = NULL; } @@ -4783,10 +4844,9 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_qib_param; } - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.in_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer; queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), GFP_KERNEL); @@ -4807,11 +4867,11 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_queue_start_poll; } + for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { - out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( - card->qdio.out_qs[i]->bufs[j]->buffer); - } + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++) + out_sbal_ptrs[k] = + card->qdio.out_qs[i]->bufs[j]->buffer; memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); @@ -5289,7 +5349,7 @@ next_packet: offset = 0; } - hdr = element->addr + offset; + hdr = phys_to_virt(element->addr) + offset; offset += sizeof(*hdr); skb = NULL; @@ -5344,7 +5404,7 @@ next_packet: } use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || - ((skb_len >= card->options.rx_sg_cb) && + (skb_len > card->options.rx_sg_cb && !atomic_read(&card->force_alloc_skb) && !IS_OSN(card)); @@ -5388,7 +5448,7 @@ use_skb: walk_packet: while (skb_len) { int data_len = min(skb_len, (int)(element->length - offset)); - char *data = element->addr + offset; + char *data = phys_to_virt(element->addr) + offset; skb_len -= data_len; offset += data_len; @@ -5447,7 +5507,6 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget, { int work_done = 0; - WARN_ON_ONCE(!budget); *done = false; while (budget) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 2bd9993aa60b..78cae61bc924 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + unsigned int cnt; char *tmp; - int cnt, old_cnt; int rc = 0; mutex_lock(&card->conf_mutex); @@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, goto out; } - old_cnt = card->qdio.in_buf_pool.buf_count; cnt = simple_strtoul(buf, &tmp, 10); cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN : ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); - if (old_cnt != cnt) { - rc = qeth_realloc_buffer_pool(card, cnt); - } + + rc = qeth_resize_buffer_pool(card, cnt); + out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 692bd2623401..8fb29371788b 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } @@ -1707,15 +1708,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) QETH_CARD_TEXT(card, 2, "vniccsch"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and enable/disable are supported */ if (!(card->options.vnicc.sup_chars & vnicc) || !(card->options.vnicc.set_char_sup & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* set enable/disable command and store wanted characteristic */ if (state) { cmd = IPA_VNICC_ENABLE; @@ -1761,14 +1761,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) QETH_CARD_TEXT(card, 2, "vniccgch"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic is supported */ if (!(card->options.vnicc.sup_chars & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, query current VNICC state */ if (qeth_card_hw_is_reachable(card)) rc = qeth_l2_vnicc_query_chars(card); @@ -1786,15 +1785,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) QETH_CARD_TEXT(card, 2, "vniccsto"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and set_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* do we need to do anything? */ if (card->options.vnicc.learning_timeout == timeout) return rc; @@ -1823,14 +1821,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) QETH_CARD_TEXT(card, 2, "vniccgto"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and get_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, get timeout. Otherwise, just return stored value */ *timeout = card->options.vnicc.learning_timeout; if (qeth_card_hw_is_reachable(card)) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 317d56647a4a..82f800d1d7b3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1178,6 +1178,7 @@ static void qeth_l3_stop_card(struct qeth_card *card) qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 29f2517d2a31..a3d1c3bdfadb 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) { card->options.sniffer = i; - if (card->qdio.init_pool.buf_count != - QETH_IN_BUF_COUNT_MAX) - qeth_realloc_buffer_pool(card, - QETH_IN_BUF_COUNT_MAX); - } else + qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); + } else { rc = -EPERM; + } + break; default: rc = -EINVAL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 223a805f0b0b..cae9b7ff79b0 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2510,7 +2510,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { sbale = &sbal->element[idx]; - req_id = (unsigned long) sbale->addr; + req_id = sbale->addr; fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); if (!fsf_req) { diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 2b1e4da1944f..4bfb79f20588 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -410,7 +410,7 @@ struct fsf_qtcb_bottom_port { u8 cb_util; u8 a_util; u8 res2; - u16 temperature; + s16 temperature; u16 vcc; u16 tx_bias; u16 tx_power; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 661436a92f8e..f0d6296e673b 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -98,7 +98,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); sbale = qdio->res_q[idx]->element; - req_id = (u64) sbale->addr; + req_id = sbale->addr; scount = min(sbale->scount + 1, ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); /* incl. signaling SBAL */ @@ -199,7 +199,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, q_req->sbal_number); return -EINVAL; } - sbale->addr = sg_virt(sg); + sbale->addr = sg_phys(sg); sbale->length = sg->length; } return 0; @@ -418,7 +418,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) sbale->length = 0; sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; sbale->sflags = 0; - sbale->addr = NULL; + sbale->addr = 0; } if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 2a816a37b3c0..6b43d6b254be 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -122,14 +122,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, % QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale_req(qdio, q_req); - sbale->addr = (void *) req_id; + sbale->addr = req_id; sbale->eflags = 0; sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; if (unlikely(!data)) return; sbale++; - sbale->addr = data; + sbale->addr = virt_to_phys(data); sbale->length = len; } @@ -152,7 +152,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); q_req->sbale_curr++; sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->addr = data; + sbale->addr = virt_to_phys(data); sbale->length = len; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 494b9fe9cc94..a711a0d15100 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -800,7 +800,7 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL) -ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd"); ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu"); ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu"); ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu"); diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 3170b295a5da..186259417449 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -36,6 +36,7 @@ #include <linux/jiffies.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/msdos_partition.h> #include <scsi/scsicam.h> #include <asm/dma.h> @@ -3410,9 +3411,10 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, a partition table entry whose end_head matches one of the standard BusLogic geometry translations (64/32, 128/32, or 255/63). */ - if (*(unsigned short *) (buf + 64) == 0xAA55) { - struct partition *part1_entry = (struct partition *) buf; - struct partition *part_entry = part1_entry; + if (*(unsigned short *) (buf + 64) == MSDOS_LABEL_MAGIC) { + struct msdos_partition *part1_entry = + (struct msdos_partition *)buf; + struct msdos_partition *part_entry = part1_entry; int saved_cyl = diskparam->cylinders, part_no; unsigned char part_end_head = 0, part_end_sector = 0; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a7881f8eb05e..1b6eaf8da5fa 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -989,6 +989,7 @@ config SCSI_SYM53C8XX_MMIO config SCSI_IPR tristate "IBM Power Linux RAID adapter support" depends on PCI && SCSI && ATA + select SATA_HOST select FW_LOADER select IRQ_POLL select SGL_ALLOC diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index ee6bc2f9b80a..0443b74390cf 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -33,6 +33,7 @@ #include <linux/syscalls.h> #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/msdos_partition.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -328,9 +329,9 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, buf = scsi_bios_ptable(bdev); if (!buf) return 0; - if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) { - struct partition *first = (struct partition * )buf; - struct partition *entry = first; + if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) { + struct msdos_partition *first = (struct msdos_partition *)buf; + struct msdos_partition *entry = first; int saved_cylinders = param->cylinders; int num; unsigned char end_head, end_sec; diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 57992519384e..dc4fe334efd0 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -723,24 +723,17 @@ static int ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { - uint8_t *bh; int heads; int sectors; int cylinders; - int ret; int extended; struct ahd_softc *ahd; ahd = *((struct ahd_softc **)sdev->host->hostdata); - bh = scsi_bios_ptable(bdev); - if (bh) { - ret = scsi_partsize(bh, capacity, - &geom[2], &geom[0], &geom[1]); - kfree(bh); - if (ret != -1) - return (ret); - } + if (scsi_partsize(bdev, capacity, geom)) + return 0; + heads = 64; sectors = 32; cylinders = aic_sector_div(capacity, heads, sectors); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index d5c4a0d23706..2edfa0594f18 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -695,11 +695,9 @@ static int ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { - uint8_t *bh; int heads; int sectors; int cylinders; - int ret; int extended; struct ahc_softc *ahc; u_int channel; @@ -707,14 +705,9 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, ahc = *((struct ahc_softc **)sdev->host->hostdata); channel = sdev_channel(sdev); - bh = scsi_bios_ptable(bdev); - if (bh) { - ret = scsi_partsize(bh, capacity, - &geom[2], &geom[0], &geom[1]); - kfree(bh); - if (ret != -1) - return (ret); - } + if (scsi_partsize(bdev, capacity, geom)) + return 0; + heads = 64; sectors = 32; cylinders = aic_sector_div(capacity, heads, sectors); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 40dc8eac0e3a..c2c79a37a9ef 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -353,16 +353,11 @@ static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom) { - int ret, heads, sectors, cylinders, total_capacity; - unsigned char *buffer;/* return copy of block device's partition table */ + int heads, sectors, cylinders, total_capacity; + + if (scsi_partsize(bdev, capacity, geom)) + return 0; - buffer = scsi_bios_ptable(bdev); - if (buffer) { - ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]); - kfree(buffer); - if (ret != -1) - return ret; - } total_capacity = capacity; heads = 64; sectors = 32; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index ae45cbe98ae2..cd8db1349871 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9950,6 +9950,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->max_devs_supported = ipr_max_devs; if (ioa_cfg->sis64) { + host->max_channel = IPR_MAX_SIS64_BUSES; host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_SIS64_DEVS) @@ -9958,6 +9959,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + ((sizeof(struct ipr_config_table_entry64) * ioa_cfg->max_devs_supported))); } else { + host->max_channel = IPR_VSET_BUS; host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) @@ -9967,7 +9969,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, * ioa_cfg->max_devs_supported))); } - host->max_channel = IPR_VSET_BUS; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; host->can_queue = ioa_cfg->max_cmds; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index a67baeb36d1f..b97aa9ac2ffe 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1300,6 +1300,7 @@ struct ipr_resource_entry { #define IPR_ARRAY_VIRTUAL_BUS 0x1 #define IPR_VSET_VIRTUAL_BUS 0x2 #define IPR_IOAFP_VIRTUAL_BUS 0x3 +#define IPR_MAX_SIS64_BUSES 0x4 #define IPR_GET_RES_PHYS_LOC(res) \ (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index b48aac8dfcb8..974c3b9116d5 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -621,7 +621,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; pci_set_drvdata(pdev, pci_info); - if (efi_enabled(EFI_RUNTIME_SERVICES)) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) orom = isci_get_efi_var(pdev); if (!orom) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 9c5f7c9178c6..2b865c6423e2 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -628,6 +628,8 @@ redisc: } out: kref_put(&rdata->kref, fc_rport_destroy); + if (!IS_ERR(fp)) + fc_frame_free(fp); } /** diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig index 5c6a5eff2f8e..052ee3a26f6e 100644 --- a/drivers/scsi/libsas/Kconfig +++ b/drivers/scsi/libsas/Kconfig @@ -19,6 +19,7 @@ config SCSI_SAS_ATA bool "ATA support for libsas (requires libata)" depends on SCSI_SAS_LIBSAS depends on ATA = y || ATA = SCSI_SAS_LIBSAS + select SATA_HOST help Builds in ATA support into libsas. Will necessitate the loading of libata along with libsas. diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index ff6d4aa92421..f27ffd088c8a 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -2795,11 +2795,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { adapter_t *adapter; - unsigned char *bh; int heads; int sectors; int cylinders; - int rval; /* Get pointer to host config structure */ adapter = (adapter_t *)sdev->host->hostdata; @@ -2826,15 +2824,8 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, geom[2] = cylinders; } else { - bh = scsi_bios_ptable(bdev); - - if( bh ) { - rval = scsi_partsize(bh, capacity, - &geom[2], &geom[0], &geom[1]); - kfree(bh); - if( rval != -1 ) - return rval; - } + if (scsi_partsize(bdev, capacity, geom)) + return 0; dev_info(&adapter->dev->dev, "invalid partition on this disk on channel %d\n", diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f3b36fd0a0eb..b2ad96564484 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -623,7 +623,8 @@ retry_alloc: fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, - GFP_KERNEL, &fusion->io_request_frames_phys); + GFP_KERNEL | __GFP_NOWARN, + &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; @@ -661,7 +662,7 @@ retry_alloc: fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, - GFP_KERNEL, + GFP_KERNEL | __GFP_NOWARN, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b520a980d1dc..7a94e1171c72 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -864,7 +864,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_fail_command; } - if (atomic_read(&fcport->state) != FCS_ONLINE) { + if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3005, @@ -946,7 +946,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, goto qc24_fail_command; } - if (atomic_read(&fcport->state) != FCS_ONLINE) { + if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3077, diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 44cb054d5e66..4c6c448dc2df 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -38,6 +38,7 @@ #include <linux/hrtimer.h> #include <linux/uuid.h> #include <linux/t10-pi.h> +#include <linux/msdos_partition.h> #include <net/checksum.h> @@ -4146,7 +4147,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) static void __init sdebug_build_parts(unsigned char *ramp, unsigned long store_size) { - struct partition *pp; + struct msdos_partition *pp; int starts[SDEBUG_MAX_PARTS + 2]; int sectors_per_part, num_sectors, k; int heads_by_sects, start_sec, end_sec; @@ -4171,7 +4172,7 @@ static void __init sdebug_build_parts(unsigned char *ramp, ramp[510] = 0x55; /* magic partition markings */ ramp[511] = 0xAA; - pp = (struct partition *)(ramp + 0x1be); + pp = (struct msdos_partition *)(ramp + 0x1be); for (k = 0; starts[k + 1]; ++k, ++pp) { start_sec = starts[k]; end_sec = starts[k + 1] - 1; diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index e969138051c7..682cf08ab041 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -17,14 +17,11 @@ #include <linux/genhd.h> #include <linux/kernel.h> #include <linux/blkdev.h> +#include <linux/msdos_partition.h> #include <asm/unaligned.h> #include <scsi/scsicam.h> - -static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, - unsigned int *secs); - /** * scsi_bios_ptable - Read PC partition table out of first sector of device. * @dev: from this device @@ -35,105 +32,48 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds */ unsigned char *scsi_bios_ptable(struct block_device *dev) { - unsigned char *res = kmalloc(66, GFP_KERNEL); - if (res) { - struct block_device *bdev = dev->bd_contains; - Sector sect; - void *data = read_dev_sector(bdev, 0, §); - if (data) { - memcpy(res, data + 0x1be, 66); - put_dev_sector(sect); - } else { - kfree(res); - res = NULL; - } - } - return res; -} -EXPORT_SYMBOL(scsi_bios_ptable); - -/** - * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. - * @bdev: which device - * @capacity: size of the disk in sectors - * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders - * - * Description : determine the BIOS mapping/geometry used for a drive in a - * SCSI-CAM system, storing the results in ip as required - * by the HDIO_GETGEO ioctl(). - * - * Returns : -1 on failure, 0 on success. - */ - -int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) -{ - unsigned char *p; - u64 capacity64 = capacity; /* Suppress gcc warning */ - int ret; - - p = scsi_bios_ptable(bdev); - if (!p) - return -1; - - /* try to infer mapping from partition table */ - ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2, - (unsigned int *)ip + 0, (unsigned int *)ip + 1); - kfree(p); - - if (ret == -1 && capacity64 < (1ULL << 32)) { - /* pick some standard mapping with at most 1024 cylinders, - and at most 62 sectors per track - this works up to - 7905 MB */ - ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, - (unsigned int *)ip + 0, (unsigned int *)ip + 1); - } - - /* if something went wrong, then apparently we have to return - a geometry with more than 1024 cylinders */ - if (ret || ip[0] > 255 || ip[1] > 63) { - if ((capacity >> 11) > 65534) { - ip[0] = 255; - ip[1] = 63; - } else { - ip[0] = 64; - ip[1] = 32; - } + struct address_space *mapping = dev->bd_contains->bd_inode->i_mapping; + unsigned char *res = NULL; + struct page *page; - if (capacity > 65535*63*255) - ip[2] = 65535; - else - ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); - } + page = read_mapping_page(mapping, 0, NULL); + if (IS_ERR(page)) + return NULL; - return 0; + if (!PageError(page)) + res = kmemdup(page_address(page) + 0x1be, 66, GFP_KERNEL); + put_page(page); + return res; } -EXPORT_SYMBOL(scsicam_bios_param); +EXPORT_SYMBOL(scsi_bios_ptable); /** * scsi_partsize - Parse cylinders/heads/sectors from PC partition table - * @buf: partition table, see scsi_bios_ptable() + * @bdev: block device to parse * @capacity: size of the disk in sectors - * @cyls: put cylinders here - * @hds: put heads here - * @secs: put sectors here + * @geom: output in form of [hds, cylinders, sectors] * * Determine the BIOS mapping/geometry used to create the partition - * table, storing the results in @cyls, @hds, and @secs + * table, storing the results in @geom. * - * Returns: -1 on failure, 0 on success. + * Returns: %false on failure, %true on success. */ - -int scsi_partsize(unsigned char *buf, unsigned long capacity, - unsigned int *cyls, unsigned int *hds, unsigned int *secs) +bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]) { - struct partition *p = (struct partition *)buf, *largest = NULL; - int i, largest_cyl; int cyl, ext_cyl, end_head, end_cyl, end_sector; unsigned int logical_end, physical_end, ext_physical_end; + struct msdos_partition *p, *largest = NULL; + void *buf; + int ret = false; + buf = scsi_bios_ptable(bdev); + if (!buf) + return false; if (*(unsigned short *) (buf + 64) == 0xAA55) { - for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) { + int largest_cyl = -1, i; + + for (i = 0, p = buf; i < 4; i++, p++) { if (!p->sys_ind) continue; #ifdef DEBUG @@ -153,7 +93,7 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity, end_sector = largest->end_sector & 0x3f; if (end_head + 1 == 0 || end_sector == 0) - return -1; + goto out_free_buf; #ifdef DEBUG printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n", @@ -178,19 +118,24 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity, ,logical_end, physical_end, ext_physical_end, ext_cyl); #endif - if ((logical_end == physical_end) || - (end_cyl == 1023 && ext_physical_end == logical_end)) { - *secs = end_sector; - *hds = end_head + 1; - *cyls = capacity / ((end_head + 1) * end_sector); - return 0; + if (logical_end == physical_end || + (end_cyl == 1023 && ext_physical_end == logical_end)) { + geom[0] = end_head + 1; + geom[1] = end_sector; + geom[2] = (unsigned long)capacity / + ((end_head + 1) * end_sector); + ret = true; + goto out_free_buf; } #ifdef DEBUG printk("scsicam_bios_param : logical (%u) != physical (%u)\n", logical_end, physical_end); #endif } - return -1; + +out_free_buf: + kfree(buf); + return ret; } EXPORT_SYMBOL(scsi_partsize); @@ -258,3 +203,56 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds *hds = (unsigned int) heads; return (rv); } + +/** + * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. + * @bdev: which device + * @capacity: size of the disk in sectors + * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders + * + * Description : determine the BIOS mapping/geometry used for a drive in a + * SCSI-CAM system, storing the results in ip as required + * by the HDIO_GETGEO ioctl(). + * + * Returns : -1 on failure, 0 on success. + */ +int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) +{ + u64 capacity64 = capacity; /* Suppress gcc warning */ + int ret = 0; + + /* try to infer mapping from partition table */ + if (scsi_partsize(bdev, capacity, ip)) + return 0; + + if (capacity64 < (1ULL << 32)) { + /* + * Pick some standard mapping with at most 1024 cylinders, and + * at most 62 sectors per track - this works up to 7905 MB. + */ + ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, + (unsigned int *)ip + 0, (unsigned int *)ip + 1); + } + + /* + * If something went wrong, then apparently we have to return a geometry + * with more than 1024 cylinders. + */ + if (ret || ip[0] > 255 || ip[1] > 63) { + if ((capacity >> 11) > 65534) { + ip[0] = 255; + ip[1] = 63; + } else { + ip[0] = 64; + ip[1] = 32; + } + + if (capacity > 65535*63*255) + ip[2] = 65535; + else + ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); + } + + return 0; +} +EXPORT_SYMBOL(scsicam_bios_param); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8ca9299ffd36..a793cb08d025 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3169,9 +3169,11 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sd_validate_opt_xfer_size(sdkp, dev_max)) { q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else + } else { + q->limits.io_opt = 0; rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), (sector_t)BLK_DEF_MAX_SECTORS); + } /* Do not exceed controller limit */ rw_max = min(rw_max, queue_max_hw_sectors(q)); @@ -3187,7 +3189,8 @@ static int sd_revalidate_disk(struct gendisk *disk) sdkp->first_scan = 0; - set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); + set_capacity_revalidate_and_notify(disk, + logical_to_sectors(sdp, sdkp->capacity), false); sd_config_write_same(sdkp); kfree(buffer); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index e4282bce5834..f45c22b09726 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -161,6 +161,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct scsi_disk *sdkp = scsi_disk(disk); + sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity); unsigned int nr, i; unsigned char *buf; size_t offset, buflen = 0; @@ -171,11 +172,15 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, /* Not a zoned device */ return -EOPNOTSUPP; + if (!capacity) + /* Device gone or invalid */ + return -ENODEV; + buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); if (!buf) return -ENOMEM; - while (zone_idx < nr_zones && sector < get_capacity(disk)) { + while (zone_idx < nr_zones && sector < capacity) { ret = sd_zbc_do_report_zones(sdkp, buf, buflen, sectors_to_logical(sdkp->device, sector), true); if (ret) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 0fbb8fe6e521..e4240e4ae8bb 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -688,7 +688,7 @@ static const struct block_device_operations sr_bdops = .release = sr_block_release, .ioctl = sr_block_ioctl, #ifdef CONFIG_COMPAT - .ioctl = sr_block_compat_ioctl, + .compat_ioctl = sr_block_compat_ioctl, #endif .check_events = sr_block_check_events, .revalidate_disk = sr_block_revalidate_disk, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abd0e6b05f79..2d705694636c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3884,18 +3884,25 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) { unsigned long flags; + bool update = false; - if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT)) + if (!ufshcd_is_auto_hibern8_supported(hba)) return; spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit == ahit) - goto out_unlock; - hba->ahit = ahit; - if (!pm_runtime_suspended(hba->dev)) - ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); -out_unlock: + if (hba->ahit != ahit) { + hba->ahit = ahit; + update = true; + } spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (update && !pm_runtime_suspended(hba->dev)) { + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + ufshcd_auto_hibern8_enable(hba); + ufshcd_release(hba); + pm_runtime_put(hba->dev); + } } EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index e3f5ebc0c05e..fc2575fef51b 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1320,6 +1320,9 @@ static const struct of_device_id qcom_slim_ngd_dt_match[] = { { .compatible = "qcom,slim-ngd-v1.5.0", .data = &ngd_v1_5_offset_info, + },{ + .compatible = "qcom,slim-ngd-v2.1.0", + .data = &ngd_v1_5_offset_info, }, {} }; diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index 70014ecce2a7..7b642c330977 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -233,10 +233,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_allocate_irqs; } - err = register_dpio_irq_handlers(dpio_dev, desc.cpu); - if (err) - goto err_register_dpio_irq; - priv->io = dpaa2_io_create(&desc, dev); if (!priv->io) { dev_err(dev, "dpaa2_io_create failed\n"); @@ -244,6 +240,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_dpaa2_io_create; } + err = register_dpio_irq_handlers(dpio_dev, desc.cpu); + if (err) + goto err_register_dpio_irq; + dev_info(dev, "probed\n"); dev_dbg(dev, " receives_notifications = %d\n", desc.receives_notifications); diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c index fb70b8a3f7c5..20d37eaeb5f2 100644 --- a/drivers/soc/imx/soc-imx-scu.c +++ b/drivers/soc/imx/soc-imx-scu.c @@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id { u32 id; } resp; } data; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_misc_get_soc_uid { struct imx_sc_rpc_msg hdr; diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index 2dad4961a80b..8d4d05086906 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -59,7 +59,7 @@ static int __init exynos_chipid_early_init(void) syscon = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-chipid"); if (!syscon) - return ENODEV; + return -ENODEV; regmap = device_node_to_regmap(syscon); of_node_put(syscon); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index d6ed0c355954..efce98e9844e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -62,6 +62,13 @@ config SPI_ALTERA help This is the driver for the Altera SPI Controller. +config SPI_AR934X + tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver" + depends on ATH79 || COMPILE_TEST + help + This enables support for the SPI controller present on the + Qualcomm Atheros AR934X/QCA95XX SoCs. + config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" depends on ATH79 || COMPILE_TEST @@ -264,6 +271,13 @@ config SPI_FALCON has only been tested with m25p80 type chips. The hardware has no support for other types of SPI peripherals. +config SPI_FSI + tristate "FSI SPI driver" + depends on FSI + help + This enables support for the driver for FSI bus attached SPI + controllers. + config SPI_FSL_LPSPI tristate "Freescale i.MX LPSPI controller" depends on ARCH_MXC || COMPILE_TEST @@ -285,7 +299,6 @@ config SPI_HISI_SFC_V3XX tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" depends on (ARM64 && ACPI) || COMPILE_TEST depends on HAS_IOMEM - select CONFIG_MTD_SPI_NOR help This enables support for HiSilicon v3xx SPI-NOR flash controller found in hi16xx chipsets. @@ -415,6 +428,7 @@ config SPI_FSL_ESPI config SPI_MESON_SPICC tristate "Amlogic Meson SPICC controller" + depends on COMMON_CLK depends on ARCH_MESON || COMPILE_TEST help This enables master mode support for the SPICC (SPI communication @@ -443,6 +457,16 @@ config SPI_MT7621 help This selects a driver for the MediaTek MT7621 SPI Controller. +config SPI_MTK_NOR + tristate "MediaTek SPI NOR controller" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + This enables support for SPI NOR controller found on MediaTek + ARM SoCs. This is a controller specifically for SPI-NOR flash. + It can perform generic SPI transfers up to 6 bytes via generic + SPI interface as well as several SPI-NOR specific instructions + via SPI MEM interface. + config SPI_NPCM_FIU tristate "Nuvoton NPCM FLASH Interface Unit" depends on ARCH_NPCM || COMPILE_TEST @@ -890,6 +914,17 @@ config SPI_ZYNQMP_GQSPI # Add new SPI master controllers in alphabetical order above this line # +comment "SPI Multiplexer support" + +config SPI_MUX + tristate "SPI multiplexer support" + select MULTIPLEXER + help + This adds support for SPI multiplexers. Each SPI mux will be + accessible as a SPI controller, the devices behind the mux will appear + to be chip selects on this controller. It is still necessary to + select one or more specific mux-controller drivers. + # # There are lots of SPI device types, with sensors and memory # being probably the most widely used ones. diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 9b65ec5afc5e..28f601327f8c 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -9,11 +9,13 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG # config declarations into driver model code obj-$(CONFIG_SPI_MASTER) += spi.o obj-$(CONFIG_SPI_MEM) += spi-mem.o +obj-$(CONFIG_SPI_MUX) += spi-mux.o obj-$(CONFIG_SPI_SPIDEV) += spidev.o obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o # SPI master controller drivers (bus) obj-$(CONFIG_SPI_ALTERA) += spi-altera.o +obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o @@ -40,6 +42,7 @@ spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o +obj-$(CONFIG_SPI_FSI) += spi-fsi.o obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o @@ -62,6 +65,7 @@ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o +obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o obj-$(CONFIG_SPI_MXIC) += spi-mxic.o obj-$(CONFIG_SPI_MXS) += spi-mxs.o obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index fd8007ebb145..cb44d1e169aa 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -149,6 +149,7 @@ struct atmel_qspi { struct clk *qspick; struct platform_device *pdev; const struct atmel_qspi_caps *caps; + resource_size_t mmap_size; u32 pending; u32 mr; u32 scr; @@ -172,6 +173,81 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; +#ifdef VERBOSE_DEBUG +static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) +{ + switch (offset) { + case QSPI_CR: + return "CR"; + case QSPI_MR: + return "MR"; + case QSPI_RD: + return "MR"; + case QSPI_TD: + return "TD"; + case QSPI_SR: + return "SR"; + case QSPI_IER: + return "IER"; + case QSPI_IDR: + return "IDR"; + case QSPI_IMR: + return "IMR"; + case QSPI_SCR: + return "SCR"; + case QSPI_IAR: + return "IAR"; + case QSPI_ICR: + return "ICR/WICR"; + case QSPI_IFR: + return "IFR"; + case QSPI_RICR: + return "RICR"; + case QSPI_SMR: + return "SMR"; + case QSPI_SKR: + return "SKR"; + case QSPI_WPMR: + return "WPMR"; + case QSPI_WPSR: + return "WPSR"; + case QSPI_VERSION: + return "VERSION"; + default: + snprintf(tmp, sz, "0x%02x", offset); + break; + } + + return tmp; +} +#endif /* VERBOSE_DEBUG */ + +static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset) +{ + u32 value = readl_relaxed(aq->regs + offset); + +#ifdef VERBOSE_DEBUG + char tmp[8]; + + dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value, + atmel_qspi_reg_name(offset, tmp, sizeof(tmp))); +#endif /* VERBOSE_DEBUG */ + + return value; +} + +static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset) +{ +#ifdef VERBOSE_DEBUG + char tmp[8]; + + dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value, + atmel_qspi_reg_name(offset, tmp, sizeof(tmp))); +#endif /* VERBOSE_DEBUG */ + + writel_relaxed(value, aq->regs + offset); +} + static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, const struct atmel_qspi_mode *mode) { @@ -292,32 +368,32 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, * Serial Memory Mode (SMM). */ if (aq->mr != QSPI_MR_SMM) { - writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); aq->mr = QSPI_MR_SMM; } /* Clear pending interrupts */ - (void)readl_relaxed(aq->regs + QSPI_SR); + (void)atmel_qspi_read(aq, QSPI_SR); if (aq->caps->has_ricr) { if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN) ifr |= QSPI_IFR_APBTFRTYP_READ; /* Set QSPI Instruction Frame registers */ - writel_relaxed(iar, aq->regs + QSPI_IAR); + atmel_qspi_write(iar, aq, QSPI_IAR); if (op->data.dir == SPI_MEM_DATA_IN) - writel_relaxed(icr, aq->regs + QSPI_RICR); + atmel_qspi_write(icr, aq, QSPI_RICR); else - writel_relaxed(icr, aq->regs + QSPI_WICR); - writel_relaxed(ifr, aq->regs + QSPI_IFR); + atmel_qspi_write(icr, aq, QSPI_WICR); + atmel_qspi_write(ifr, aq, QSPI_IFR); } else { if (op->data.dir == SPI_MEM_DATA_OUT) ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; /* Set QSPI Instruction Frame registers */ - writel_relaxed(iar, aq->regs + QSPI_IAR); - writel_relaxed(icr, aq->regs + QSPI_ICR); - writel_relaxed(ifr, aq->regs + QSPI_IFR); + atmel_qspi_write(iar, aq, QSPI_IAR); + atmel_qspi_write(icr, aq, QSPI_ICR); + atmel_qspi_write(ifr, aq, QSPI_IFR); } return 0; @@ -329,6 +405,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) u32 sr, offset; int err; + /* + * Check if the address exceeds the MMIO window size. An improvement + * would be to add support for regular SPI mode and fall back to it + * when the flash memories overrun the controller's memory space. + */ + if (op->addr.val + op->data.nbytes > aq->mmap_size) + return -ENOTSUPP; + err = atmel_qspi_set_cfg(aq, op, &offset); if (err) return err; @@ -336,7 +420,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Skip to the final steps if there is no data */ if (op->data.nbytes) { /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)readl_relaxed(aq->regs + QSPI_IFR); + (void)atmel_qspi_read(aq, QSPI_IFR); /* Send/Receive data */ if (op->data.dir == SPI_MEM_DATA_IN) @@ -347,22 +431,22 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) op->data.nbytes); /* Release the chip-select */ - writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR); + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); } /* Poll INSTRuction End status */ - sr = readl_relaxed(aq->regs + QSPI_SR); + sr = atmel_qspi_read(aq, QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) return err; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); aq->pending = sr & QSPI_SR_CMD_COMPLETED; - writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER); + atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER); if (!wait_for_completion_timeout(&aq->cmd_completion, msecs_to_jiffies(1000))) err = -ETIMEDOUT; - writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR); + atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); return err; } @@ -401,7 +485,7 @@ static int atmel_qspi_setup(struct spi_device *spi) scbr--; aq->scr = QSPI_SCR_SCBR(scbr); - writel_relaxed(aq->scr, aq->regs + QSPI_SCR); + atmel_qspi_write(aq->scr, aq, QSPI_SCR); return 0; } @@ -409,14 +493,14 @@ static int atmel_qspi_setup(struct spi_device *spi) static void atmel_qspi_init(struct atmel_qspi *aq) { /* Reset the QSPI controller */ - writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); + atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); /* Set the QSPI controller by default in Serial Memory Mode */ - writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); aq->mr = QSPI_MR_SMM; /* Enable the QSPI controller */ - writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); + atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) @@ -424,8 +508,8 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) struct atmel_qspi *aq = dev_id; u32 status, mask, pending; - status = readl_relaxed(aq->regs + QSPI_SR); - mask = readl_relaxed(aq->regs + QSPI_IMR); + status = atmel_qspi_read(aq, QSPI_SR); + mask = atmel_qspi_read(aq, QSPI_IMR); pending = status & mask; if (!pending) @@ -480,6 +564,8 @@ static int atmel_qspi_probe(struct platform_device *pdev) goto exit; } + aq->mmap_size = resource_size(res); + /* Get the peripheral clock */ aq->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(aq->pclk)) @@ -558,7 +644,7 @@ static int atmel_qspi_remove(struct platform_device *pdev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); spi_unregister_controller(ctrl); - writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR); + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); clk_disable_unprepare(aq->qspick); clk_disable_unprepare(aq->pclk); return 0; @@ -585,7 +671,7 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) atmel_qspi_init(aq); - writel_relaxed(aq->scr, aq->regs + QSPI_SCR); + atmel_qspi_write(aq->scr, aq, QSPI_SCR); return 0; } diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c new file mode 100644 index 000000000000..d08dec09d423 --- /dev/null +++ b/drivers/spi/spi-ar934x.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs +// +// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> +// +// Based on spi-mt7621.c: +// Copyright (C) 2011 Sergiy <piratfm@gmail.com> +// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org> +// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name> + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/spi/spi.h> + +#define DRIVER_NAME "spi-ar934x" + +#define AR934X_SPI_REG_FS 0x00 +#define AR934X_SPI_ENABLE BIT(0) + +#define AR934X_SPI_REG_IOC 0x08 +#define AR934X_SPI_IOC_INITVAL 0x70000 + +#define AR934X_SPI_REG_CTRL 0x04 +#define AR934X_SPI_CLK_MASK GENMASK(5, 0) + +#define AR934X_SPI_DATAOUT 0x10 + +#define AR934X_SPI_REG_SHIFT_CTRL 0x14 +#define AR934X_SPI_SHIFT_EN BIT(31) +#define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n)) +#define AR934X_SPI_SHIFT_TERM 26 +#define AR934X_SPI_SHIFT_VAL(cs, term, count) \ + (AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \ + (term) << AR934X_SPI_SHIFT_TERM | (count)) + +#define AR934X_SPI_DATAIN 0x18 + +struct ar934x_spi { + struct spi_controller *ctlr; + void __iomem *base; + struct clk *clk; + unsigned int clk_freq; +}; + +static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq) +{ + int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1; + + if (div < 0) + return 0; + else if (div > AR934X_SPI_CLK_MASK) + return -EINVAL; + else + return div; +} + +static int ar934x_spi_setup(struct spi_device *spi) +{ + struct ar934x_spi *sp = spi_controller_get_devdata(spi->master); + + if ((spi->max_speed_hz == 0) || + (spi->max_speed_hz > (sp->clk_freq / 2))) { + spi->max_speed_hz = sp->clk_freq / 2; + } else if (spi->max_speed_hz < (sp->clk_freq / 128)) { + dev_err(&spi->dev, "spi clock is too low\n"); + return -EINVAL; + } + + return 0; +} + +static int ar934x_spi_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct ar934x_spi *sp = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + struct spi_device *spi = m->spi; + unsigned long trx_done, trx_cur; + int stat = 0; + u8 term = 0; + int div, i; + u32 reg; + const u8 *tx_buf; + u8 *buf; + + m->actual_length = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->speed_hz) + div = ar934x_spi_clk_div(sp, t->speed_hz); + else + div = ar934x_spi_clk_div(sp, spi->max_speed_hz); + if (div < 0) { + stat = -EIO; + goto msg_done; + } + + reg = ioread32(sp->base + AR934X_SPI_REG_CTRL); + reg &= ~AR934X_SPI_CLK_MASK; + reg |= div; + iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL); + iowrite32(0, sp->base + AR934X_SPI_DATAOUT); + + for (trx_done = 0; trx_done < t->len; trx_done += 4) { + trx_cur = t->len - trx_done; + if (trx_cur > 4) + trx_cur = 4; + else if (list_is_last(&t->transfer_list, &m->transfers)) + term = 1; + + if (t->tx_buf) { + tx_buf = t->tx_buf + trx_done; + reg = tx_buf[0]; + for (i = 1; i < trx_cur; i++) + reg = reg << 8 | tx_buf[i]; + iowrite32(reg, sp->base + AR934X_SPI_DATAOUT); + } + + reg = AR934X_SPI_SHIFT_VAL(spi->chip_select, term, + trx_cur * 8); + iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL); + stat = readl_poll_timeout( + sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg, + !(reg & AR934X_SPI_SHIFT_EN), 0, 5); + if (stat < 0) + goto msg_done; + + if (t->rx_buf) { + reg = ioread32(sp->base + AR934X_SPI_DATAIN); + buf = t->rx_buf + trx_done; + for (i = 0; i < trx_cur; i++) { + buf[trx_cur - i - 1] = reg & 0xff; + reg >>= 8; + } + } + } + m->actual_length += t->len; + } + +msg_done: + m->status = stat; + spi_finalize_current_message(master); + + return 0; +} + +static const struct of_device_id ar934x_spi_match[] = { + { .compatible = "qca,ar934x-spi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ar934x_spi_match); + +static int ar934x_spi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct ar934x_spi *sp; + void __iomem *base; + struct clk *clk; + int ret; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); + if (!ctlr) { + dev_info(&pdev->dev, "failed to allocate spi controller\n"); + return -ENOMEM; + } + + /* disable flash mapping and expose spi controller registers */ + iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS); + /* restore pins to default state: CSn=1 DO=CLK=0 */ + iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC); + + ctlr->mode_bits = SPI_LSB_FIRST; + ctlr->setup = ar934x_spi_setup; + ctlr->transfer_one_message = ar934x_spi_transfer_one_message; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->num_chipselect = 3; + + dev_set_drvdata(&pdev->dev, ctlr); + + sp = spi_controller_get_devdata(ctlr); + sp->base = base; + sp->clk = clk; + sp->clk_freq = clk_get_rate(clk); + sp->ctlr = ctlr; + + return devm_spi_register_controller(&pdev->dev, ctlr); +} + +static int ar934x_spi_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct ar934x_spi *sp; + + ctlr = dev_get_drvdata(&pdev->dev); + sp = spi_controller_get_devdata(ctlr); + + clk_disable_unprepare(sp->clk); + + return 0; +} + +static struct platform_driver ar934x_spi_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = ar934x_spi_match, + }, + .probe = ar934x_spi_probe, + .remove = ar934x_spi_remove, +}; + +module_platform_driver(ar934x_spi_driver); + +MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx"); +MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 7327309ea3d5..6c235306c0e4 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) goto out_disable_clk; rate = clk_get_rate(pll_clk); - clk_disable_unprepare(pll_clk); if (!rate) { ret = -EINVAL; goto out_disable_pll_clk; diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c index 64d4c441b641..ea6e4a7b3feb 100644 --- a/drivers/spi/spi-efm32.c +++ b/drivers/spi/spi-efm32.c @@ -6,14 +6,13 @@ #include <linux/io.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> -#include <linux/of_gpio.h> #include <linux/platform_data/efm32-spi.h> +#include <linux/of.h> #define DRIVER_NAME "efm32-spi" @@ -82,9 +81,6 @@ struct efm32_spi_ddata { const u8 *tx_buf; u8 *rx_buf; unsigned tx_len, rx_len; - - /* chip selects */ - unsigned csgpio[]; }; #define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev)) @@ -102,14 +98,6 @@ static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset) return readl_relaxed(ddata->base + offset); } -static void efm32_spi_chipselect(struct spi_device *spi, int is_on) -{ - struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); - int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE); - - gpio_set_value(ddata->csgpio[spi->chip_select], value); -} - static int efm32_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { @@ -320,17 +308,11 @@ static int efm32_spi_probe(struct platform_device *pdev) int ret; struct spi_master *master; struct device_node *np = pdev->dev.of_node; - int num_cs, i; if (!np) return -EINVAL; - num_cs = of_gpio_named_count(np, "cs-gpios"); - if (num_cs < 0) - return num_cs; - - master = spi_alloc_master(&pdev->dev, - sizeof(*ddata) + num_cs * sizeof(unsigned)); + master = spi_alloc_master(&pdev->dev, sizeof(*ddata)); if (!master) { dev_dbg(&pdev->dev, "failed to allocate spi master controller\n"); @@ -340,14 +322,13 @@ static int efm32_spi_probe(struct platform_device *pdev) master->dev.of_node = pdev->dev.of_node; - master->num_chipselect = num_cs; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); + master->use_gpio_descriptors = true; ddata = spi_master_get_devdata(master); ddata->bitbang.master = master; - ddata->bitbang.chipselect = efm32_spi_chipselect; ddata->bitbang.setup_transfer = efm32_spi_setup_transfer; ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs; @@ -361,25 +342,6 @@ static int efm32_spi_probe(struct platform_device *pdev) goto err; } - for (i = 0; i < num_cs; ++i) { - ret = of_get_named_gpio(np, "cs-gpios", i); - if (ret < 0) { - dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n", - i, ret); - goto err; - } - ddata->csgpio[i] = ret; - dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]); - ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i], - GPIOF_OUT_INIT_LOW, DRIVER_NAME); - if (ret < 0) { - dev_err(&pdev->dev, - "failed to configure csgpio#%u (%d)\n", - i, ret); - goto err; - } - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c new file mode 100644 index 000000000000..37a3e0f8e752 --- /dev/null +++ b/drivers/spi/spi-fsi.c @@ -0,0 +1,558 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (C) IBM Corporation 2020 + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/fsi.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +#define FSI_ENGID_SPI 0x23 +#define FSI_MBOX_ROOT_CTRL_8 0x2860 + +#define FSI2SPI_DATA0 0x00 +#define FSI2SPI_DATA1 0x04 +#define FSI2SPI_CMD 0x08 +#define FSI2SPI_CMD_WRITE BIT(31) +#define FSI2SPI_RESET 0x18 +#define FSI2SPI_STATUS 0x1c +#define FSI2SPI_STATUS_ANY_ERROR BIT(31) +#define FSI2SPI_IRQ 0x20 + +#define SPI_FSI_BASE 0x70000 +#define SPI_FSI_INIT_TIMEOUT_MS 1000 +#define SPI_FSI_MAX_TRANSFER_SIZE 2048 + +#define SPI_FSI_ERROR 0x0 +#define SPI_FSI_COUNTER_CFG 0x1 +#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32) +#define SPI_FSI_CFG1 0x2 +#define SPI_FSI_CLOCK_CFG 0x3 +#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32) +#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33)) +#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38)) +#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39)) +#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42)) +#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44) +#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51) +#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52) +#define SPI_FSI_MMAP 0x4 +#define SPI_FSI_DATA_TX 0x5 +#define SPI_FSI_DATA_RX 0x6 +#define SPI_FSI_SEQUENCE 0x7 +#define SPI_FSI_SEQUENCE_STOP 0x00 +#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf)) +#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf)) +#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf)) +#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0 +#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf)) +#define SPI_FSI_STATUS 0x8 +#define SPI_FSI_STATUS_ERROR \ + (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12)) +#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48) +#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48) +#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57) +#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58) +#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59) +#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61) +#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62) +#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63) +#define SPI_FSI_STATUS_ANY_ERROR \ + (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \ + SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \ + SPI_FSI_STATUS_RDR_OVERRUN) +#define SPI_FSI_PORT_CTRL 0x9 + +struct fsi_spi { + struct device *dev; /* SPI controller device */ + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ + u32 base; +}; + +struct fsi_spi_sequence { + int bit; + u64 data; +}; + +static int fsi_spi_check_status(struct fsi_spi *ctx) +{ + int rc; + u32 sts; + __be32 sts_be; + + rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be, + sizeof(sts_be)); + if (rc) + return rc; + + sts = be32_to_cpu(sts_be); + if (sts & FSI2SPI_STATUS_ANY_ERROR) { + dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts); + return -EIO; + } + + return 0; +} + +static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value) +{ + int rc; + __be32 cmd_be; + __be32 data_be; + u32 cmd = offset + ctx->base; + + *value = 0ULL; + + if (cmd & FSI2SPI_CMD_WRITE) + return -EINVAL; + + cmd_be = cpu_to_be32(cmd); + rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); + if (rc) + return rc; + + rc = fsi_spi_check_status(ctx); + if (rc) + return rc; + + rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be, + sizeof(data_be)); + if (rc) + return rc; + + *value |= (u64)be32_to_cpu(data_be) << 32; + + rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be, + sizeof(data_be)); + if (rc) + return rc; + + *value |= (u64)be32_to_cpu(data_be); + dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value); + + return 0; +} + +static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value) +{ + int rc; + __be32 cmd_be; + __be32 data_be; + u32 cmd = offset + ctx->base; + + if (cmd & FSI2SPI_CMD_WRITE) + return -EINVAL; + + dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value); + + data_be = cpu_to_be32(upper_32_bits(value)); + rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be, + sizeof(data_be)); + if (rc) + return rc; + + data_be = cpu_to_be32(lower_32_bits(value)); + rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be, + sizeof(data_be)); + if (rc) + return rc; + + cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE); + rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); + if (rc) + return rc; + + return fsi_spi_check_status(ctx); +} + +static int fsi_spi_data_in(u64 in, u8 *rx, int len) +{ + int i; + int num_bytes = min(len, 8); + + for (i = 0; i < num_bytes; ++i) + rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i))); + + return num_bytes; +} + +static int fsi_spi_data_out(u64 *out, const u8 *tx, int len) +{ + int i; + int num_bytes = min(len, 8); + u8 *out_bytes = (u8 *)out; + + /* Unused bytes of the tx data should be 0. */ + *out = 0ULL; + + for (i = 0; i < num_bytes; ++i) + out_bytes[8 - (i + 1)] = tx[i]; + + return num_bytes; +} + +static int fsi_spi_reset(struct fsi_spi *ctx) +{ + int rc; + + dev_dbg(ctx->dev, "Resetting SPI controller.\n"); + + rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, + SPI_FSI_CLOCK_CFG_RESET1); + if (rc) + return rc; + + return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, + SPI_FSI_CLOCK_CFG_RESET2); +} + +static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) +{ + /* + * Add the next byte of instruction to the 8-byte sequence register. + * Then decrement the counter so that the next instruction will go in + * the right place. Return the number of "slots" left in the sequence + * register. + */ + seq->data |= (u64)val << seq->bit; + seq->bit -= 8; + + return ((64 - seq->bit) / 8) - 2; +} + +static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq) +{ + seq->bit = 56; + seq->data = 0ULL; +} + +static int fsi_spi_sequence_transfer(struct fsi_spi *ctx, + struct fsi_spi_sequence *seq, + struct spi_transfer *transfer) +{ + int loops; + int idx; + int rc; + u8 len = min(transfer->len, 8U); + u8 rem = transfer->len % len; + + loops = transfer->len / len; + + if (transfer->tx_buf) { + idx = fsi_spi_sequence_add(seq, + SPI_FSI_SEQUENCE_SHIFT_OUT(len)); + if (rem) + rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem); + } else if (transfer->rx_buf) { + idx = fsi_spi_sequence_add(seq, + SPI_FSI_SEQUENCE_SHIFT_IN(len)); + if (rem) + rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem); + } else { + return -EINVAL; + } + + if (loops > 1) { + fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx)); + + if (rem) + fsi_spi_sequence_add(seq, rem); + + rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, + SPI_FSI_COUNTER_CFG_LOOPS(loops - 1)); + if (rc) + return rc; + } + + return 0; +} + +static int fsi_spi_transfer_data(struct fsi_spi *ctx, + struct spi_transfer *transfer) +{ + int rc = 0; + u64 status = 0ULL; + + if (transfer->tx_buf) { + int nb; + int sent = 0; + u64 out = 0ULL; + const u8 *tx = transfer->tx_buf; + + while (transfer->len > sent) { + nb = fsi_spi_data_out(&out, &tx[sent], + (int)transfer->len - sent); + + rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out); + if (rc) + return rc; + + do { + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, + &status); + if (rc) + return rc; + + if (status & SPI_FSI_STATUS_ANY_ERROR) { + rc = fsi_spi_reset(ctx); + if (rc) + return rc; + + return -EREMOTEIO; + } + } while (status & SPI_FSI_STATUS_TDR_FULL); + + sent += nb; + } + } else if (transfer->rx_buf) { + int recv = 0; + u64 in = 0ULL; + u8 *rx = transfer->rx_buf; + + while (transfer->len > recv) { + do { + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, + &status); + if (rc) + return rc; + + if (status & SPI_FSI_STATUS_ANY_ERROR) { + rc = fsi_spi_reset(ctx); + if (rc) + return rc; + + return -EREMOTEIO; + } + } while (!(status & SPI_FSI_STATUS_RDR_FULL)); + + rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); + if (rc) + return rc; + + recv += fsi_spi_data_in(in, &rx[recv], + (int)transfer->len - recv); + } + } + + return 0; +} + +static int fsi_spi_transfer_init(struct fsi_spi *ctx) +{ + int rc; + bool reset = false; + unsigned long end; + u64 seq_state; + u64 clock_cfg = 0ULL; + u64 status = 0ULL; + u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE | + SPI_FSI_CLOCK_CFG_SCK_NO_DEL | + FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4); + + end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); + do { + if (time_after(jiffies, end)) + return -ETIMEDOUT; + + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status); + if (rc) + return rc; + + seq_state = status & SPI_FSI_STATUS_SEQ_STATE; + + if (status & (SPI_FSI_STATUS_ANY_ERROR | + SPI_FSI_STATUS_TDR_FULL | + SPI_FSI_STATUS_RDR_FULL)) { + if (reset) + return -EIO; + + rc = fsi_spi_reset(ctx); + if (rc) + return rc; + + reset = true; + continue; + } + } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE)); + + rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg); + if (rc) + return rc; + + if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE | + SPI_FSI_CLOCK_CFG_ECC_DISABLE | + SPI_FSI_CLOCK_CFG_MODE | + SPI_FSI_CLOCK_CFG_SCK_RECV_DEL | + SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg) + rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, + wanted_clock_cfg); + + return rc; +} + +static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *mesg) +{ + int rc = 0; + u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1); + struct spi_transfer *transfer; + struct fsi_spi *ctx = spi_controller_get_devdata(ctlr); + + list_for_each_entry(transfer, &mesg->transfers, transfer_list) { + struct fsi_spi_sequence seq; + struct spi_transfer *next = NULL; + + /* Sequencer must do shift out (tx) first. */ + if (!transfer->tx_buf || + transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) { + rc = -EINVAL; + goto error; + } + + dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len); + + rc = fsi_spi_transfer_init(ctx); + if (rc < 0) + goto error; + + fsi_spi_sequence_init(&seq); + fsi_spi_sequence_add(&seq, seq_slave); + + rc = fsi_spi_sequence_transfer(ctx, &seq, transfer); + if (rc) + goto error; + + if (!list_is_last(&transfer->transfer_list, + &mesg->transfers)) { + next = list_next_entry(transfer, transfer_list); + + /* Sequencer can only do shift in (rx) after tx. */ + if (next->rx_buf) { + if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) { + rc = -EINVAL; + goto error; + } + + dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n", + next->len); + + rc = fsi_spi_sequence_transfer(ctx, &seq, + next); + if (rc) + goto error; + } else { + next = NULL; + } + } + + fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0)); + + rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data); + if (rc) + goto error; + + rc = fsi_spi_transfer_data(ctx, transfer); + if (rc) + goto error; + + if (next) { + rc = fsi_spi_transfer_data(ctx, next); + if (rc) + goto error; + + transfer = next; + } + } + +error: + mesg->status = rc; + spi_finalize_current_message(ctlr); + + return rc; +} + +static size_t fsi_spi_max_transfer_size(struct spi_device *spi) +{ + return SPI_FSI_MAX_TRANSFER_SIZE; +} + +static int fsi_spi_probe(struct device *dev) +{ + int rc; + u32 root_ctrl_8; + struct device_node *np; + int num_controllers_registered = 0; + struct fsi_device *fsi = to_fsi_dev(dev); + + /* + * Check the SPI mux before attempting to probe. If the mux isn't set + * then the SPI controllers can't access their slave devices. + */ + rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8, + sizeof(root_ctrl_8)); + if (rc) + return rc; + + if (!root_ctrl_8) { + dev_dbg(dev, "SPI mux not set, aborting probe.\n"); + return -ENODEV; + } + + for_each_available_child_of_node(dev->of_node, np) { + u32 base; + struct fsi_spi *ctx; + struct spi_controller *ctlr; + + if (of_property_read_u32(np, "reg", &base)) + continue; + + ctlr = spi_alloc_master(dev, sizeof(*ctx)); + if (!ctlr) + break; + + ctlr->dev.of_node = np; + ctlr->num_chipselect = of_get_available_child_count(np) ?: 1; + ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; + ctlr->max_transfer_size = fsi_spi_max_transfer_size; + ctlr->transfer_one_message = fsi_spi_transfer_one_message; + + ctx = spi_controller_get_devdata(ctlr); + ctx->dev = &ctlr->dev; + ctx->fsi = fsi; + ctx->base = base + SPI_FSI_BASE; + + rc = devm_spi_register_controller(dev, ctlr); + if (rc) + spi_controller_put(ctlr); + else + num_controllers_registered++; + } + + if (!num_controllers_registered) + return -ENODEV; + + return 0; +} + +static const struct fsi_device_id fsi_spi_ids[] = { + { FSI_ENGID_SPI, FSI_VERSION_ANY }, + { } +}; +MODULE_DEVICE_TABLE(fsi, fsi_spi_ids); + +static struct fsi_driver fsi_spi_driver = { + .id_table = fsi_spi_ids, + .drv = { + .name = "spi-fsi", + .bus = &fsi_bus_type, + .probe = fsi_spi_probe, + }, +}; +module_fsi_driver(fsi_spi_driver); + +MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>"); +MODULE_DESCRIPTION("FSI attached SPI controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 6ec2dcb8c57a..50e41f66a2d7 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -20,16 +20,9 @@ #define DRIVER_NAME "fsl-dspi" -#ifdef CONFIG_M5441x -#define DSPI_FIFO_SIZE 16 -#else -#define DSPI_FIFO_SIZE 4 -#endif -#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) - #define SPI_MCR 0x00 #define SPI_MCR_MASTER BIT(31) -#define SPI_MCR_PCSIS (0x3F << 16) +#define SPI_MCR_PCSIS(x) ((x) << 16) #define SPI_MCR_CLR_TXF BIT(11) #define SPI_MCR_CLR_RXF BIT(10) #define SPI_MCR_XSPI BIT(3) @@ -79,6 +72,7 @@ #define SPI_RSER 0x30 #define SPI_RSER_TCFQE BIT(31) #define SPI_RSER_EOQFE BIT(28) +#define SPI_RSER_CMDTCFE BIT(23) #define SPI_PUSHR 0x34 #define SPI_PUSHR_CMD_CONT BIT(15) @@ -109,57 +103,95 @@ #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4) -/* Register offsets for regmap_pushr */ -#define PUSHR_CMD 0x0 -#define PUSHR_TX 0x2 - #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) struct chip_data { u32 ctar_val; - u16 void_write_data; }; enum dspi_trans_mode { DSPI_EOQ_MODE = 0, - DSPI_TCFQ_MODE, + DSPI_XSPI_MODE, DSPI_DMA_MODE, }; struct fsl_dspi_devtype_data { enum dspi_trans_mode trans_mode; u8 max_clock_factor; - bool ptp_sts_supported; - bool xspi_mode; -}; - -static const struct fsl_dspi_devtype_data vf610_data = { - .trans_mode = DSPI_DMA_MODE, - .max_clock_factor = 2, -}; - -static const struct fsl_dspi_devtype_data ls1021a_v1_data = { - .trans_mode = DSPI_TCFQ_MODE, - .max_clock_factor = 8, - .ptp_sts_supported = true, - .xspi_mode = true, + int fifo_size; }; -static const struct fsl_dspi_devtype_data ls2085a_data = { - .trans_mode = DSPI_TCFQ_MODE, - .max_clock_factor = 8, - .ptp_sts_supported = true, +enum { + LS1021A, + LS1012A, + LS1028A, + LS1043A, + LS1046A, + LS2080A, + LS2085A, + LX2160A, + MCF5441X, + VF610, }; -static const struct fsl_dspi_devtype_data coldfire_data = { - .trans_mode = DSPI_EOQ_MODE, - .max_clock_factor = 8, +static const struct fsl_dspi_devtype_data devtype_data[] = { + [VF610] = { + .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 2, + .fifo_size = 4, + }, + [LS1021A] = { + /* Has A-011218 DMA erratum */ + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 8, + .fifo_size = 4, + }, + [LS1012A] = { + /* Has A-011218 DMA erratum */ + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 8, + .fifo_size = 16, + }, + [LS1028A] = { + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 8, + .fifo_size = 4, + }, + [LS1043A] = { + /* Has A-011218 DMA erratum */ + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 8, + .fifo_size = 16, + }, + [LS1046A] = { + /* Has A-011218 DMA erratum */ + .trans_mode = DSPI_XSPI_MODE, + .max_clock_factor = 8, + .fifo_size = 16, + }, + [LS2080A] = { + .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 8, + .fifo_size = 4, + }, + [LS2085A] = { + .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 8, + .fifo_size = 4, + }, + [LX2160A] = { + .trans_mode = DSPI_DMA_MODE, + .max_clock_factor = 8, + .fifo_size = 4, + }, + [MCF5441X] = { + .trans_mode = DSPI_EOQ_MODE, + .max_clock_factor = 8, + .fifo_size = 16, + }, }; struct fsl_dspi_dma { - /* Length of transfer in words of DSPI_FIFO_SIZE */ - u32 curr_xfer_len; - u32 *tx_dma_buf; struct dma_chan *chan_tx; dma_addr_t tx_dma_phys; @@ -189,36 +221,99 @@ struct fsl_dspi { size_t len; const void *tx; void *rx; - void *rx_end; - u16 void_write_data; u16 tx_cmd; - u8 bits_per_word; - u8 bytes_per_word; const struct fsl_dspi_devtype_data *devtype_data; - wait_queue_head_t waitq; - u32 waitflags; + struct completion xfer_done; struct fsl_dspi_dma *dma; + + int oper_word_size; + int oper_bits_per_word; + + int words_in_flight; + + /* + * Offsets for CMD and TXDATA within SPI_PUSHR when accessed + * individually (in XSPI mode) + */ + int pushr_cmd; + int pushr_tx; + + void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata); + void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); }; +static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) +{ + memcpy(txdata, dspi->tx, dspi->oper_word_size); + dspi->tx += dspi->oper_word_size; +} + +static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) +{ + memcpy(dspi->rx, &rxdata, dspi->oper_word_size); + dspi->rx += dspi->oper_word_size; +} + +static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) +{ + *txdata = cpu_to_be32(*(u32 *)dspi->tx); + dspi->tx += sizeof(u32); +} + +static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) +{ + *(u32 *)dspi->rx = be32_to_cpu(rxdata); + dspi->rx += sizeof(u32); +} + +static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) +{ + *txdata = cpu_to_be16(*(u16 *)dspi->tx); + dspi->tx += sizeof(u16); +} + +static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) +{ + *(u16 *)dspi->rx = be16_to_cpu(rxdata); + dspi->rx += sizeof(u16); +} + +static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) +{ + u16 hi = *(u16 *)dspi->tx; + u16 lo = *(u16 *)(dspi->tx + 2); + + *txdata = (u32)hi << 16 | lo; + dspi->tx += sizeof(u32); +} + +static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) +{ + u16 hi = rxdata & 0xffff; + u16 lo = rxdata >> 16; + + *(u16 *)dspi->rx = lo; + *(u16 *)(dspi->rx + 2) = hi; + dspi->rx += sizeof(u32); +} + +/* + * Pop one word from the TX buffer for pushing into the + * PUSHR register (TX FIFO) + */ static u32 dspi_pop_tx(struct fsl_dspi *dspi) { u32 txdata = 0; - if (dspi->tx) { - if (dspi->bytes_per_word == 1) - txdata = *(u8 *)dspi->tx; - else if (dspi->bytes_per_word == 2) - txdata = *(u16 *)dspi->tx; - else /* dspi->bytes_per_word == 4 */ - txdata = *(u32 *)dspi->tx; - dspi->tx += dspi->bytes_per_word; - } - dspi->len -= dspi->bytes_per_word; + if (dspi->tx) + dspi->host_to_dev(dspi, &txdata); + dspi->len -= dspi->oper_word_size; return txdata; } +/* Prepare one TX FIFO entry (txdata plus cmd) */ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) { u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); @@ -231,21 +326,12 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) return cmd << 16 | data; } +/* Push one word to the RX buffer from the POPR register (RX FIFO) */ static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata) { if (!dspi->rx) return; - - /* Mask off undefined bits */ - rxdata &= (1 << dspi->bits_per_word) - 1; - - if (dspi->bytes_per_word == 1) - *(u8 *)dspi->rx = rxdata; - else if (dspi->bytes_per_word == 2) - *(u16 *)dspi->rx = rxdata; - else /* dspi->bytes_per_word == 4 */ - *(u32 *)dspi->rx = rxdata; - dspi->rx += dspi->bytes_per_word; + dspi->dev_to_host(dspi, rxdata); } static void dspi_tx_dma_callback(void *arg) @@ -263,7 +349,7 @@ static void dspi_rx_dma_callback(void *arg) int i; if (dspi->rx) { - for (i = 0; i < dma->curr_xfer_len; i++) + for (i = 0; i < dspi->words_in_flight; i++) dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]); } @@ -277,12 +363,12 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) int time_left; int i; - for (i = 0; i < dma->curr_xfer_len; i++) + for (i = 0; i < dspi->words_in_flight; i++) dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi); dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, dma->tx_dma_phys, - dma->curr_xfer_len * + dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -300,7 +386,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, dma->rx_dma_phys, - dma->curr_xfer_len * + dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -348,45 +434,42 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) return 0; } +static void dspi_setup_accel(struct fsl_dspi *dspi); + static int dspi_dma_xfer(struct fsl_dspi *dspi) { struct spi_message *message = dspi->cur_msg; struct device *dev = &dspi->pdev->dev; - struct fsl_dspi_dma *dma = dspi->dma; - int curr_remaining_bytes; - int bytes_per_buffer; int ret = 0; - curr_remaining_bytes = dspi->len; - bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE; - while (curr_remaining_bytes) { - /* Check if current transfer fits the DMA buffer */ - dma->curr_xfer_len = curr_remaining_bytes - / dspi->bytes_per_word; - if (dma->curr_xfer_len > bytes_per_buffer) - dma->curr_xfer_len = bytes_per_buffer; + /* + * dspi->len gets decremented by dspi_pop_tx_pushr in + * dspi_next_xfer_dma_submit + */ + while (dspi->len) { + /* Figure out operational bits-per-word for this chunk */ + dspi_setup_accel(dspi); + + dspi->words_in_flight = dspi->len / dspi->oper_word_size; + if (dspi->words_in_flight > dspi->devtype_data->fifo_size) + dspi->words_in_flight = dspi->devtype_data->fifo_size; + + message->actual_length += dspi->words_in_flight * + dspi->oper_word_size; ret = dspi_next_xfer_dma_submit(dspi); if (ret) { dev_err(dev, "DMA transfer failed\n"); - goto exit; - - } else { - const int len = - dma->curr_xfer_len * dspi->bytes_per_word; - curr_remaining_bytes -= len; - message->actual_length += len; - if (curr_remaining_bytes < 0) - curr_remaining_bytes = 0; + break; } } -exit: return ret; } static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) { + int dma_bufsize = dspi->devtype_data->fifo_size * 2; struct device *dev = &dspi->pdev->dev; struct dma_slave_config cfg; struct fsl_dspi_dma *dma; @@ -410,15 +493,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) goto err_tx_channel; } - dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, - &dma->tx_dma_phys, GFP_KERNEL); + dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev, + dma_bufsize, &dma->tx_dma_phys, + GFP_KERNEL); if (!dma->tx_dma_buf) { ret = -ENOMEM; goto err_tx_dma_buf; } - dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, - &dma->rx_dma_phys, GFP_KERNEL); + dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev, + dma_bufsize, &dma->rx_dma_phys, + GFP_KERNEL); if (!dma->rx_dma_buf) { ret = -ENOMEM; goto err_rx_dma_buf; @@ -454,11 +539,11 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) return 0; err_slave_config: - dma_free_coherent(dev, DSPI_DMA_BUFSIZE, - dma->rx_dma_buf, dma->rx_dma_phys); + dma_free_coherent(dma->chan_rx->device->dev, + dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys); err_rx_dma_buf: - dma_free_coherent(dev, DSPI_DMA_BUFSIZE, - dma->tx_dma_buf, dma->tx_dma_phys); + dma_free_coherent(dma->chan_tx->device->dev, + dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys); err_tx_dma_buf: dma_release_channel(dma->chan_tx); err_tx_channel: @@ -472,21 +557,21 @@ err_tx_channel: static void dspi_release_dma(struct fsl_dspi *dspi) { + int dma_bufsize = dspi->devtype_data->fifo_size * 2; struct fsl_dspi_dma *dma = dspi->dma; - struct device *dev = &dspi->pdev->dev; if (!dma) return; if (dma->chan_tx) { - dma_unmap_single(dev, dma->tx_dma_phys, - DSPI_DMA_BUFSIZE, DMA_TO_DEVICE); + dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys, + dma_bufsize, DMA_TO_DEVICE); dma_release_channel(dma->chan_tx); } if (dma->chan_rx) { - dma_unmap_single(dev, dma->rx_dma_phys, - DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE); + dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys, + dma_bufsize, DMA_FROM_DEVICE); dma_release_channel(dma->chan_rx); } } @@ -562,124 +647,220 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns, } } -static void fifo_write(struct fsl_dspi *dspi) +static void dspi_pushr_write(struct fsl_dspi *dspi) { regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi)); } -static void cmd_fifo_write(struct fsl_dspi *dspi) +static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) { - u16 cmd = dspi->tx_cmd; - - if (dspi->len > 0) + /* + * The only time when the PCS doesn't need continuation after this word + * is when it's last. We need to look ahead, because we actually call + * dspi_pop_tx (the function that decrements dspi->len) _after_ + * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One + * word is enough. If there's more to transmit than that, + * dspi_xspi_write will know to split the FIFO writes in 2, and + * generate a new PUSHR command with the final word that will have PCS + * deasserted (not continued) here. + */ + if (dspi->len > dspi->oper_word_size) cmd |= SPI_PUSHR_CMD_CONT; - regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd); + regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd); } -static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata) +static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata) { - regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata); + regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata); } -static void dspi_tcfq_write(struct fsl_dspi *dspi) +static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) { - /* Clear transfer count */ - dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; + int num_bytes = num_words * dspi->oper_word_size; + u16 tx_cmd = dspi->tx_cmd; - if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { - /* Write the CMD FIFO entry first, and then the two - * corresponding TX FIFO entries. - */ - u32 data = dspi_pop_tx(dspi); + /* + * If the PCS needs to de-assert (i.e. we're at the end of the buffer + * and cs_change does not want the PCS to stay on), then we need a new + * PUSHR command, since this one (for the body of the buffer) + * necessarily has the CONT bit set. + * So send one word less during this go, to force a split and a command + * with a single word next time, when CONT will be unset. + */ + if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len) + tx_cmd |= SPI_PUSHR_CMD_EOQ; - cmd_fifo_write(dspi); - tx_fifo_write(dspi, data & 0xFFFF); - tx_fifo_write(dspi, data >> 16); - } else { - /* Write one entry to both TX FIFO and CMD FIFO - * simultaneously. - */ - fifo_write(dspi); - } -} + /* Update CTARE */ + regmap_write(dspi->regmap, SPI_CTARE(0), + SPI_FRAME_EBITS(dspi->oper_bits_per_word) | + SPI_CTARE_DTCP(num_words)); -static u32 fifo_read(struct fsl_dspi *dspi) -{ - u32 rxdata = 0; + /* + * Write the CMD FIFO entry first, and then the two + * corresponding TX FIFO entries (or one...). + */ + dspi_pushr_cmd_write(dspi, tx_cmd); - regmap_read(dspi->regmap, SPI_POPR, &rxdata); - return rxdata; -} + /* Fill TX FIFO with as many transfers as possible */ + while (num_words--) { + u32 data = dspi_pop_tx(dspi); -static void dspi_tcfq_read(struct fsl_dspi *dspi) -{ - dspi_push_rx(dspi, fifo_read(dspi)); + dspi_pushr_txdata_write(dspi, data & 0xFFFF); + if (dspi->oper_bits_per_word > 16) + dspi_pushr_txdata_write(dspi, data >> 16); + } } -static void dspi_eoq_write(struct fsl_dspi *dspi) +static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words) { - int fifo_size = DSPI_FIFO_SIZE; u16 xfer_cmd = dspi->tx_cmd; /* Fill TX FIFO with as many transfers as possible */ - while (dspi->len && fifo_size--) { + while (num_words--) { dspi->tx_cmd = xfer_cmd; /* Request EOQF for last transfer in FIFO */ - if (dspi->len == dspi->bytes_per_word || fifo_size == 0) + if (num_words == 0) dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; - /* Clear transfer count for first transfer in FIFO */ - if (fifo_size == (DSPI_FIFO_SIZE - 1)) - dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; /* Write combined TX FIFO and CMD FIFO entry */ - fifo_write(dspi); + dspi_pushr_write(dspi); } } -static void dspi_eoq_read(struct fsl_dspi *dspi) +static u32 dspi_popr_read(struct fsl_dspi *dspi) { - int fifo_size = DSPI_FIFO_SIZE; + u32 rxdata = 0; + + regmap_read(dspi->regmap, SPI_POPR, &rxdata); + return rxdata; +} + +static void dspi_fifo_read(struct fsl_dspi *dspi) +{ + int num_fifo_entries = dspi->words_in_flight; /* Read one FIFO entry and push to rx buffer */ - while ((dspi->rx < dspi->rx_end) && fifo_size--) - dspi_push_rx(dspi, fifo_read(dspi)); + while (num_fifo_entries--) + dspi_push_rx(dspi, dspi_popr_read(dspi)); } -static int dspi_rxtx(struct fsl_dspi *dspi) +static void dspi_setup_accel(struct fsl_dspi *dspi) { + struct spi_transfer *xfer = dspi->cur_transfer; + bool odd = !!(dspi->len & 1); + + /* No accel for frames not multiple of 8 bits at the moment */ + if (xfer->bits_per_word % 8) + goto no_accel; + + if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { + dspi->oper_bits_per_word = 16; + } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) { + dspi->oper_bits_per_word = 8; + } else { + /* Start off with maximum supported by hardware */ + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) + dspi->oper_bits_per_word = 32; + else + dspi->oper_bits_per_word = 16; + + /* + * And go down only if the buffer can't be sent with + * words this big + */ + do { + if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) + break; + + dspi->oper_bits_per_word /= 2; + } while (dspi->oper_bits_per_word > 8); + } + + if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) { + dspi->dev_to_host = dspi_8on32_dev_to_host; + dspi->host_to_dev = dspi_8on32_host_to_dev; + } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) { + dspi->dev_to_host = dspi_8on16_dev_to_host; + dspi->host_to_dev = dspi_8on16_host_to_dev; + } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) { + dspi->dev_to_host = dspi_16on32_dev_to_host; + dspi->host_to_dev = dspi_16on32_host_to_dev; + } else { +no_accel: + dspi->dev_to_host = dspi_native_dev_to_host; + dspi->host_to_dev = dspi_native_host_to_dev; + dspi->oper_bits_per_word = xfer->bits_per_word; + } + + dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); + + /* + * Update CTAR here (code is common for EOQ, XSPI and DMA modes). + * We will update CTARE in the portion specific to XSPI, when we + * also know the preload value (DTCP). + */ + regmap_write(dspi->regmap, SPI_CTAR(0), + dspi->cur_chip->ctar_val | + SPI_FRAME_BITS(dspi->oper_bits_per_word)); +} + +static void dspi_fifo_write(struct fsl_dspi *dspi) +{ + int num_fifo_entries = dspi->devtype_data->fifo_size; + struct spi_transfer *xfer = dspi->cur_transfer; struct spi_message *msg = dspi->cur_msg; - enum dspi_trans_mode trans_mode; - u16 spi_tcnt; - u32 spi_tcr; + int num_words, num_bytes; - spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, - dspi->progress, !dspi->irq); + dspi_setup_accel(dspi); + + /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ + if (dspi->oper_word_size == 4) + num_fifo_entries /= 2; - /* Get transfer counter (in number of SPI transfers). It was - * reset to 0 when transfer(s) were started. + /* + * Integer division intentionally trims off odd (or non-multiple of 4) + * numbers of bytes at the end of the buffer, which will be sent next + * time using a smaller oper_word_size. */ - regmap_read(dspi->regmap, SPI_TCR, &spi_tcr); - spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); + num_words = dspi->len / dspi->oper_word_size; + if (num_words > num_fifo_entries) + num_words = num_fifo_entries; + /* Update total number of bytes that were transferred */ - msg->actual_length += spi_tcnt * dspi->bytes_per_word; - dspi->progress += spi_tcnt; + num_bytes = num_words * dspi->oper_word_size; + msg->actual_length += num_bytes; + dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8); - trans_mode = dspi->devtype_data->trans_mode; - if (trans_mode == DSPI_EOQ_MODE) - dspi_eoq_read(dspi); - else if (trans_mode == DSPI_TCFQ_MODE) - dspi_tcfq_read(dspi); + /* + * Update shared variable for use in the next interrupt (both in + * dspi_fifo_read and in dspi_fifo_write). + */ + dspi->words_in_flight = num_words; + + spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq); + + if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE) + dspi_eoq_fifo_write(dspi, num_words); + else + dspi_xspi_fifo_write(dspi, num_words); + /* + * Everything after this point is in a potential race with the next + * interrupt, so we must never use dspi->words_in_flight again since it + * might already be modified by the next dspi_fifo_write. + */ + + spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, + dspi->progress, !dspi->irq); +} + +static int dspi_rxtx(struct fsl_dspi *dspi) +{ + dspi_fifo_read(dspi); if (!dspi->len) /* Success! */ return 0; - spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->progress, !dspi->irq); - - if (trans_mode == DSPI_EOQ_MODE) - dspi_eoq_write(dspi); - else if (trans_mode == DSPI_TCFQ_MODE) - dspi_tcfq_write(dspi); + dspi_fifo_write(dspi); return -EINPROGRESS; } @@ -693,7 +874,7 @@ static int dspi_poll(struct fsl_dspi *dspi) regmap_read(dspi->regmap, SPI_SR, &spi_sr); regmap_write(dspi->regmap, SPI_SR, spi_sr); - if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) + if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)) break; } while (--tries); @@ -711,13 +892,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) regmap_read(dspi->regmap, SPI_SR, &spi_sr); regmap_write(dspi->regmap, SPI_SR, spi_sr); - if (!(spi_sr & SPI_SR_EOQF)) + if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))) return IRQ_NONE; - if (dspi_rxtx(dspi) == 0) { - dspi->waitflags = 1; - wake_up_interruptible(&dspi->waitq); - } + if (dspi_rxtx(dspi) == 0) + complete(&dspi->xfer_done); return IRQ_HANDLED; } @@ -727,7 +906,6 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, { struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); struct spi_device *spi = message->spi; - enum dspi_trans_mode trans_mode; struct spi_transfer *transfer; int status = 0; @@ -757,76 +935,38 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } - dspi->void_write_data = dspi->cur_chip->void_write_data; - dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; - dspi->rx_end = dspi->rx + transfer->len; dspi->len = transfer->len; dspi->progress = 0; - /* Validated transfer specific frame size (defaults applied) */ - dspi->bits_per_word = transfer->bits_per_word; - if (transfer->bits_per_word <= 8) - dspi->bytes_per_word = 1; - else if (transfer->bits_per_word <= 16) - dspi->bytes_per_word = 2; - else - dspi->bytes_per_word = 4; regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); - regmap_write(dspi->regmap, SPI_CTAR(0), - dspi->cur_chip->ctar_val | - SPI_FRAME_BITS(transfer->bits_per_word)); - if (dspi->devtype_data->xspi_mode) - regmap_write(dspi->regmap, SPI_CTARE(0), - SPI_FRAME_EBITS(transfer->bits_per_word) | - SPI_CTARE_DTCP(1)); spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); - trans_mode = dspi->devtype_data->trans_mode; - switch (trans_mode) { - case DSPI_EOQ_MODE: - regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); - dspi_eoq_write(dspi); - break; - case DSPI_TCFQ_MODE: - regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE); - dspi_tcfq_write(dspi); - break; - case DSPI_DMA_MODE: - regmap_write(dspi->regmap, SPI_RSER, - SPI_RSER_TFFFE | SPI_RSER_TFFFD | - SPI_RSER_RFDFE | SPI_RSER_RFDFD); + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { status = dspi_dma_xfer(dspi); - break; - default: - dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", - trans_mode); - status = -EINVAL; - goto out; - } - - if (!dspi->irq) { - do { - status = dspi_poll(dspi); - } while (status == -EINPROGRESS); - } else if (trans_mode != DSPI_DMA_MODE) { - status = wait_event_interruptible(dspi->waitq, - dspi->waitflags); - dspi->waitflags = 0; + } else { + dspi_fifo_write(dspi); + + if (dspi->irq) { + wait_for_completion(&dspi->xfer_done); + reinit_completion(&dspi->xfer_done); + } else { + do { + status = dspi_poll(dspi); + } while (status == -EINPROGRESS); + } } if (status) - dev_err(&dspi->pdev->dev, - "Waiting for transfer to complete failed!\n"); + break; spi_transfer_delay_exec(transfer); } -out: message->status = status; spi_finalize_current_message(ctlr); @@ -864,8 +1004,6 @@ static int dspi_setup(struct spi_device *spi) sck_cs_delay = pdata->sck_cs_delay; } - chip->void_write_data = 0; - clkrate = clk_get_rate(dspi->clk); hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); @@ -909,9 +1047,34 @@ static void dspi_cleanup(struct spi_device *spi) } static const struct of_device_id fsl_dspi_dt_ids[] = { - { .compatible = "fsl,vf610-dspi", .data = &vf610_data, }, - { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, }, - { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, }, + { + .compatible = "fsl,vf610-dspi", + .data = &devtype_data[VF610], + }, { + .compatible = "fsl,ls1021a-v1.0-dspi", + .data = &devtype_data[LS1021A], + }, { + .compatible = "fsl,ls1012a-dspi", + .data = &devtype_data[LS1012A], + }, { + .compatible = "fsl,ls1028a-dspi", + .data = &devtype_data[LS1028A], + }, { + .compatible = "fsl,ls1043a-dspi", + .data = &devtype_data[LS1043A], + }, { + .compatible = "fsl,ls1046a-dspi", + .data = &devtype_data[LS1046A], + }, { + .compatible = "fsl,ls2080a-dspi", + .data = &devtype_data[LS2080A], + }, { + .compatible = "fsl,ls2085a-dspi", + .data = &devtype_data[LS2085A], + }, { + .compatible = "fsl,lx2160a-dspi", + .data = &devtype_data[LX2160A], + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); @@ -997,20 +1160,40 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { }, }; -static void dspi_init(struct fsl_dspi *dspi) +static int dspi_init(struct fsl_dspi *dspi) { - unsigned int mcr = SPI_MCR_PCSIS; + unsigned int mcr; + + /* Set idle states for all chip select signals to high */ + mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0)); - if (dspi->devtype_data->xspi_mode) + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) mcr |= SPI_MCR_XSPI; if (!spi_controller_is_slave(dspi->ctlr)) mcr |= SPI_MCR_MASTER; regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); - if (dspi->devtype_data->xspi_mode) - regmap_write(dspi->regmap, SPI_CTARE(0), - SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1)); + + switch (dspi->devtype_data->trans_mode) { + case DSPI_EOQ_MODE: + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); + break; + case DSPI_XSPI_MODE: + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); + break; + case DSPI_DMA_MODE: + regmap_write(dspi->regmap, SPI_RSER, + SPI_RSER_TFFFE | SPI_RSER_TFFFD | + SPI_RSER_RFDFE | SPI_RSER_RFDFD); + break; + default: + dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", + dspi->devtype_data->trans_mode); + return -EINVAL; + } + + return 0; } static int dspi_slave_abort(struct spi_master *master) @@ -1021,8 +1204,10 @@ static int dspi_slave_abort(struct spi_master *master) * Terminate all pending DMA transactions for the SPI working * in SLAVE mode. */ - dmaengine_terminate_sync(dspi->dma->chan_rx); - dmaengine_terminate_sync(dspi->dma->chan_tx); + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { + dmaengine_terminate_sync(dspi->dma->chan_rx); + dmaengine_terminate_sync(dspi->dma->chan_tx); + } /* Clear the internal DSPI RX and TX FIFO buffers */ regmap_update_bits(dspi->regmap, SPI_MCR, @@ -1032,16 +1217,33 @@ static int dspi_slave_abort(struct spi_master *master) return 0; } +/* + * EOQ mode will inevitably deassert its PCS signal on last word in a queue + * (hardware limitation), so we need to inform the spi_device that larger + * buffers than the FIFO size are going to have the chip select randomly + * toggling, so it has a chance to adapt its message sizes. + */ +static size_t dspi_max_message_size(struct spi_device *spi) +{ + struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); + + if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE) + return dspi->devtype_data->fifo_size; + + return SIZE_MAX; +} + static int dspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct regmap_config *regmap_config; struct fsl_dspi_platform_data *pdata; struct spi_controller *ctlr; - int ret, cs_num, bus_num; + int ret, cs_num, bus_num = -1; struct fsl_dspi *dspi; struct resource *res; void __iomem *base; + bool big_endian; ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); if (!ctlr) @@ -1053,6 +1255,7 @@ static int dspi_probe(struct platform_device *pdev) ctlr->setup = dspi_setup; ctlr->transfer_one_message = dspi_transfer_one_message; + ctlr->max_message_size = dspi_max_message_size; ctlr->dev.of_node = pdev->dev.of_node; ctlr->cleanup = dspi_cleanup; @@ -1064,7 +1267,9 @@ static int dspi_probe(struct platform_device *pdev) ctlr->num_chipselect = pdata->cs_num; ctlr->bus_num = pdata->bus_num; - dspi->devtype_data = &coldfire_data; + /* Only Coldfire uses platform data */ + dspi->devtype_data = &devtype_data[MCF5441X]; + big_endian = true; } else { ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); @@ -1074,11 +1279,7 @@ static int dspi_probe(struct platform_device *pdev) } ctlr->num_chipselect = cs_num; - ret = of_property_read_u32(np, "bus-num", &bus_num); - if (ret < 0) { - dev_err(&pdev->dev, "can't get bus-num\n"); - goto out_ctlr_put; - } + of_property_read_u32(np, "bus-num", &bus_num); ctlr->bus_num = bus_num; if (of_property_read_bool(np, "spi-slave")) @@ -1090,9 +1291,18 @@ static int dspi_probe(struct platform_device *pdev) ret = -EFAULT; goto out_ctlr_put; } + + big_endian = of_device_is_big_endian(np); + } + if (big_endian) { + dspi->pushr_cmd = 0; + dspi->pushr_tx = 2; + } else { + dspi->pushr_cmd = 2; + dspi->pushr_tx = 0; } - if (dspi->devtype_data->xspi_mode) + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); else ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); @@ -1104,7 +1314,7 @@ static int dspi_probe(struct platform_device *pdev) goto out_ctlr_put; } - if (dspi->devtype_data->xspi_mode) + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) regmap_config = &dspi_xspi_regmap_config[0]; else regmap_config = &dspi_regmap_config; @@ -1116,7 +1326,7 @@ static int dspi_probe(struct platform_device *pdev) goto out_ctlr_put; } - if (dspi->devtype_data->xspi_mode) { + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { dspi->regmap_pushr = devm_regmap_init_mmio( &pdev->dev, base + SPI_PUSHR, &dspi_xspi_regmap_config[1]); @@ -1139,10 +1349,9 @@ static int dspi_probe(struct platform_device *pdev) if (ret) goto out_ctlr_put; - dspi_init(dspi); - - if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE) - goto poll_mode; + ret = dspi_init(dspi); + if (ret) + goto out_clk_put; dspi->irq = platform_get_irq(pdev, 0); if (dspi->irq <= 0) { @@ -1159,7 +1368,7 @@ static int dspi_probe(struct platform_device *pdev) goto out_clk_put; } - init_waitqueue_head(&dspi->waitq); + init_completion(&dspi->xfer_done); poll_mode: @@ -1174,7 +1383,8 @@ poll_mode: ctlr->max_speed_hz = clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; - ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported; + if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) + ctlr->ptp_sts_supported = true; platform_set_drvdata(pdev, ctlr); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index d0b8cc741a24..8b41b70f6f5c 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -86,8 +86,6 @@ #define TCR_RXMSK BIT(19) #define TCR_TXMSK BIT(18) -static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; - struct lpspi_config { u8 bpw; u8 chip_select; @@ -125,7 +123,7 @@ struct fsl_lpspi_data { struct completion dma_rx_completion; struct completion dma_tx_completion; - int chipselect[0]; + int chipselect[]; }; static const struct of_device_id fsl_lpspi_dt_ids[] = { @@ -331,15 +329,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) } for (prescale = 0; prescale < 8; prescale++) { - scldiv = perclk_rate / - (clkdivs[prescale] * config.speed_hz) - 2; + scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; break; } } - if (prescale == 8 && scldiv >= 256) + if (scldiv >= 256) return -EINVAL; writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16), diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index e8a499cd1f13..02e5cba0a5bb 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -484,7 +484,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) } if (needs_wakeup_wait_mode(q)) - pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&q->pm_qos_req, 0); return 0; } @@ -492,7 +492,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) { if (needs_wakeup_wait_mode(q)) - pm_qos_remove_request(&q->pm_qos_req); + cpu_latency_qos_remove_request(&q->pm_qos_req); clk_disable_unprepare(q->clk); clk_disable_unprepare(q->clk_en); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 6f3d64a1a2b3..c3972424af71 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -6,7 +6,6 @@ #include <linux/io.h> #include <linux/log2.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/qcom-geni-se.h> @@ -536,6 +535,7 @@ static int spi_geni_probe(struct platform_device *pdev) struct spi_geni_master *mas; void __iomem *base; struct clk *clk; + struct device *dev = &pdev->dev; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -545,28 +545,25 @@ static int spi_geni_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); - clk = devm_clk_get(&pdev->dev, "se"); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "Err getting SE Core clk %ld\n", - PTR_ERR(clk)); + clk = devm_clk_get(dev, "se"); + if (IS_ERR(clk)) return PTR_ERR(clk); - } - spi = spi_alloc_master(&pdev->dev, sizeof(*mas)); + spi = spi_alloc_master(dev, sizeof(*mas)); if (!spi) return -ENOMEM; platform_set_drvdata(pdev, spi); mas = spi_master_get_devdata(spi); mas->irq = irq; - mas->dev = &pdev->dev; - mas->se.dev = &pdev->dev; - mas->se.wrapper = dev_get_drvdata(pdev->dev.parent); + mas->dev = dev; + mas->se.dev = dev; + mas->se.wrapper = dev_get_drvdata(dev->parent); mas->se.base = base; mas->se.clk = clk; spi->bus_num = -1; - spi->dev.of_node = pdev->dev.of_node; + spi->dev.of_node = dev->of_node; spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH; spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); spi->num_chipselect = 4; @@ -579,14 +576,13 @@ static int spi_geni_probe(struct platform_device *pdev) init_completion(&mas->xfer_done); spin_lock_init(&mas->lock); - pm_runtime_enable(&pdev->dev); + pm_runtime_enable(dev); ret = spi_geni_init(mas); if (ret) goto spi_geni_probe_runtime_disable; - ret = request_irq(mas->irq, geni_spi_isr, - IRQF_TRIGGER_HIGH, "spi_geni", spi); + ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi); if (ret) goto spi_geni_probe_runtime_disable; @@ -598,7 +594,7 @@ static int spi_geni_probe(struct platform_device *pdev) spi_geni_probe_free_irq: free_irq(mas->irq, spi); spi_geni_probe_runtime_disable: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); spi_master_put(spi); return ret; } diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c index 4cf8fc80a7b7..e3b57252d075 100644 --- a/drivers/spi/spi-hisi-sfc-v3xx.c +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -7,6 +7,7 @@ #include <linux/acpi.h> #include <linux/bitops.h> +#include <linux/dmi.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -17,6 +18,12 @@ #define HISI_SFC_V3XX_VERSION (0x1f8) #define HISI_SFC_V3XX_CMD_CFG (0x300) +#define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17) +#define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17) +#define HISI_SFC_V3XX_CMD_CFG_FULL_DIO (3 << 17) +#define HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT (5 << 17) +#define HISI_SFC_V3XX_CMD_CFG_QUAD_IO (6 << 17) +#define HISI_SFC_V3XX_CMD_CFG_FULL_QIO (7 << 17) #define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 #define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) #define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) @@ -161,6 +168,43 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, if (op->addr.nbytes) config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; + switch (op->data.buswidth) { + case 0 ... 1: + break; + case 2: + if (op->addr.buswidth <= 1) { + config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT; + } else if (op->addr.buswidth == 2) { + if (op->cmd.buswidth <= 1) { + config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IO; + } else if (op->cmd.buswidth == 2) { + config |= HISI_SFC_V3XX_CMD_CFG_FULL_DIO; + } else { + return -EIO; + } + } else { + return -EIO; + } + break; + case 4: + if (op->addr.buswidth <= 1) { + config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT; + } else if (op->addr.buswidth == 4) { + if (op->cmd.buswidth <= 1) { + config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IO; + } else if (op->cmd.buswidth == 4) { + config |= HISI_SFC_V3XX_CMD_CFG_FULL_QIO; + } else { + return -EIO; + } + } else { + return -EIO; + } + break; + default: + return -EOPNOTSUPP; + } + if (op->data.dir != SPI_MEM_NO_DATA) { config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; @@ -207,6 +251,44 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = { .exec_op = hisi_sfc_v3xx_exec_op, }; +static int hisi_sfc_v3xx_buswidth_override_bits; + +/* + * ACPI FW does not allow us to currently set the device buswidth, so quirk it + * depending on the board. + */ +static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d) +{ + hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD; + + return 0; +} + +static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = { + { + .callback = hisi_sfc_v3xx_dmi_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), + DMI_MATCH(DMI_PRODUCT_NAME, "D06"), + }, + }, + { + .callback = hisi_sfc_v3xx_dmi_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), + DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"), + }, + }, + { + .callback = hisi_sfc_v3xx_dmi_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), + DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"), + }, + }, + {} +}; + static int hisi_sfc_v3xx_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -222,6 +304,8 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; + ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits; + host = spi_controller_get_devdata(ctlr); host->dev = dev; @@ -277,7 +361,20 @@ static struct platform_driver hisi_sfc_v3xx_spi_driver = { .probe = hisi_sfc_v3xx_probe, }; -module_platform_driver(hisi_sfc_v3xx_spi_driver); +static int __init hisi_sfc_v3xx_spi_init(void) +{ + dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table); + + return platform_driver_register(&hisi_sfc_v3xx_spi_driver); +} + +static void __exit hisi_sfc_v3xx_spi_exit(void) +{ + platform_driver_unregister(&hisi_sfc_v3xx_spi_driver); +} + +module_init(hisi_sfc_v3xx_spi_init); +module_exit(hisi_sfc_v3xx_spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index e5a46f0eb93b..adaa0c49f966 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -418,12 +418,13 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) struct spi_controller *ctlr = mem->spi->controller; size_t len; - len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; - if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) return ctlr->mem_ops->adjust_op_size(mem, op); if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { + len = sizeof(op->cmd.opcode) + op->addr.nbytes + + op->dummy.nbytes; + if (len > spi_max_transfer_size(mem->spi)) return -EINVAL; @@ -487,7 +488,7 @@ static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, * This function is creating a direct mapping descriptor which can then be used * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). * If the SPI controller driver does not support direct mapping, this function - * fallback to an implementation using spi_mem_exec_op(), so that the caller + * falls back to an implementation using spi_mem_exec_op(), so that the caller * doesn't have to bother implementing a fallback on his own. * * Return: a valid pointer in case of success, and ERR_PTR() otherwise. diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 7f5680fe2568..77f7d0e0e46a 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -9,11 +9,13 @@ #include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/device.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/types.h> @@ -33,7 +35,6 @@ * to have a CS go down over the full transfer */ -#define SPICC_MAX_FREQ 30000000 #define SPICC_MAX_BURST 128 /* Register Map */ @@ -105,7 +106,21 @@ #define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */ #define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */ #define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */ -#define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */ +#define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */ +#define SPICC_MO_NO_DELAY 0 +#define SPICC_MO_DELAY_1_CYCLE 1 +#define SPICC_MO_DELAY_2_CYCLE 2 +#define SPICC_MO_DELAY_3_CYCLE 3 +#define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */ +#define SPICC_MI_NO_DELAY 0 +#define SPICC_MI_DELAY_1_CYCLE 1 +#define SPICC_MI_DELAY_2_CYCLE 2 +#define SPICC_MI_DELAY_3_CYCLE 3 +#define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */ +#define SPICC_CAP_AHEAD_2_CYCLE 0 +#define SPICC_CAP_AHEAD_1_CYCLE 1 +#define SPICC_CAP_NO_DELAY 2 +#define SPICC_CAP_DELAY_1_CYCLE 3 #define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */ #define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */ @@ -113,31 +128,59 @@ #define SPICC_DWADDR 0x24 /* Write Address of DMA */ +#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */ +#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0) +#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16) +#define SPICC_ENH_DATARATE_EN BIT(24) +#define SPICC_ENH_MOSI_OEN BIT(25) +#define SPICC_ENH_CLK_OEN BIT(26) +#define SPICC_ENH_CS_OEN BIT(27) +#define SPICC_ENH_CLK_CS_DELAY_EN BIT(28) +#define SPICC_ENH_MAIN_CLK_AO BIT(29) + #define writel_bits_relaxed(mask, val, addr) \ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) -#define SPICC_BURST_MAX 16 -#define SPICC_FIFO_HALF 10 +struct meson_spicc_data { + unsigned int max_speed_hz; + unsigned int min_speed_hz; + unsigned int fifo_size; + bool has_oen; + bool has_enhance_clk_div; + bool has_pclk; +}; struct meson_spicc_device { struct spi_master *master; struct platform_device *pdev; void __iomem *base; struct clk *core; + struct clk *pclk; + struct clk *clk; struct spi_message *message; struct spi_transfer *xfer; + const struct meson_spicc_data *data; u8 *tx_buf; u8 *rx_buf; unsigned int bytes_per_word; unsigned long tx_remain; - unsigned long txb_remain; unsigned long rx_remain; - unsigned long rxb_remain; unsigned long xfer_remain; - bool is_burst_end; - bool is_last_burst; }; +static void meson_spicc_oen_enable(struct meson_spicc_device *spicc) +{ + u32 conf; + + if (!spicc->data->has_oen) + return; + + conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) | + SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN; + + writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0); +} + static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc) { return !!FIELD_GET(SPICC_TF, @@ -146,7 +189,7 @@ static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc) static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc) { - return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN, + return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF, readl_relaxed(spicc->base + SPICC_STATREG)); } @@ -201,34 +244,22 @@ static inline void meson_spicc_tx(struct meson_spicc_device *spicc) spicc->base + SPICC_TXDATA); } -static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc, - u32 irq_ctrl) +static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc) { - if (spicc->rx_remain > SPICC_FIFO_HALF) - irq_ctrl |= SPICC_RH_EN; - else - irq_ctrl |= SPICC_RR_EN; - - return irq_ctrl; -} -static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc, - unsigned int burst_len) -{ + unsigned int burst_len = min_t(unsigned int, + spicc->xfer_remain / + spicc->bytes_per_word, + spicc->data->fifo_size); /* Setup Xfer variables */ spicc->tx_remain = burst_len; spicc->rx_remain = burst_len; spicc->xfer_remain -= burst_len * spicc->bytes_per_word; - spicc->is_burst_end = false; - if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain) - spicc->is_last_burst = true; - else - spicc->is_last_burst = false; /* Setup burst length */ writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, FIELD_PREP(SPICC_BURSTLENGTH_MASK, - burst_len), + burst_len - 1), spicc->base + SPICC_CONREG); /* Fill TX FIFO */ @@ -238,97 +269,71 @@ static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc, static irqreturn_t meson_spicc_irq(int irq, void *data) { struct meson_spicc_device *spicc = (void *) data; - u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG); - u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl; - ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN); + writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG); /* Empty RX FIFO */ meson_spicc_rx(spicc); - /* Enable TC interrupt since we transferred everything */ - if (!spicc->tx_remain && !spicc->rx_remain) { - spicc->is_burst_end = true; - - /* Enable TC interrupt */ - ctrl |= SPICC_TC_EN; + if (!spicc->xfer_remain) { + /* Disable all IRQs */ + writel(0, spicc->base + SPICC_INTREG); - /* Reload IRQ status */ - stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl; - } - - /* Check transfer complete */ - if ((stat & SPICC_TC) && spicc->is_burst_end) { - unsigned int burst_len; - - /* Clear TC bit */ - writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG); - - /* Disable TC interrupt */ - ctrl &= ~SPICC_TC_EN; - - if (spicc->is_last_burst) { - /* Disable all IRQs */ - writel(0, spicc->base + SPICC_INTREG); - - spi_finalize_current_transfer(spicc->master); - - return IRQ_HANDLED; - } - - burst_len = min_t(unsigned int, - spicc->xfer_remain / spicc->bytes_per_word, - SPICC_BURST_MAX); + spi_finalize_current_transfer(spicc->master); - /* Setup burst */ - meson_spicc_setup_burst(spicc, burst_len); - - /* Restart burst */ - writel_bits_relaxed(SPICC_XCH, SPICC_XCH, - spicc->base + SPICC_CONREG); + return IRQ_HANDLED; } - /* Setup RX interrupt trigger */ - ctrl = meson_spicc_setup_rx_irq(spicc, ctrl); + /* Setup burst */ + meson_spicc_setup_burst(spicc); - /* Reconfigure interrupts */ - writel(ctrl, spicc->base + SPICC_INTREG); + /* Start burst */ + writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); return IRQ_HANDLED; } -static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf, - u32 speed) +static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc) { - unsigned long parent, value; - unsigned int i, div; - - parent = clk_get_rate(spicc->core); - - /* Find closest inferior/equal possible speed */ - for (i = 0 ; i < 7 ; ++i) { - /* 2^(data_rate+2) */ - value = parent >> (i + 2); - - if (value <= speed) - break; + u32 div, hz; + u32 mi_delay, cap_delay; + u32 conf; + + if (spicc->data->has_enhance_clk_div) { + div = FIELD_GET(SPICC_ENH_DATARATE_MASK, + readl_relaxed(spicc->base + SPICC_ENH_CTL0)); + div++; + div <<= 1; + } else { + div = FIELD_GET(SPICC_DATARATE_MASK, + readl_relaxed(spicc->base + SPICC_CONREG)); + div += 2; + div = 1 << div; } - /* If provided speed it lower than max divider, use max divider */ - if (i > 7) { - div = 7; - dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n", - speed); - } else - div = i; - - dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n", - parent, speed, value, div); - - conf &= ~SPICC_DATARATE_MASK; - conf |= FIELD_PREP(SPICC_DATARATE_MASK, div); - - return conf; + mi_delay = SPICC_MI_NO_DELAY; + cap_delay = SPICC_CAP_AHEAD_2_CYCLE; + hz = clk_get_rate(spicc->clk); + + if (hz >= 100000000) + cap_delay = SPICC_CAP_DELAY_1_CYCLE; + else if (hz >= 80000000) + cap_delay = SPICC_CAP_NO_DELAY; + else if (hz >= 40000000) + cap_delay = SPICC_CAP_AHEAD_1_CYCLE; + else if (div >= 16) + mi_delay = SPICC_MI_DELAY_3_CYCLE; + else if (div >= 8) + mi_delay = SPICC_MI_DELAY_2_CYCLE; + else if (div >= 6) + mi_delay = SPICC_MI_DELAY_1_CYCLE; + + conf = readl_relaxed(spicc->base + SPICC_TESTREG); + conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK + | SPICC_MI_CAP_DELAY_MASK); + conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay); + conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay); + writel_relaxed(conf, spicc->base + SPICC_TESTREG); } static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc, @@ -339,9 +344,6 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc, /* Read original configuration */ conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG); - /* Select closest divider */ - conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz); - /* Setup word width */ conf &= ~SPICC_BITLENGTH_MASK; conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, @@ -350,6 +352,32 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc, /* Ignore if unchanged */ if (conf != conf_orig) writel_relaxed(conf, spicc->base + SPICC_CONREG); + + clk_set_rate(spicc->clk, xfer->speed_hz); + + meson_spicc_auto_io_delay(spicc); + + writel_relaxed(0, spicc->base + SPICC_DMAREG); +} + +static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc) +{ + u32 data; + + if (spicc->data->has_oen) + writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, + SPICC_ENH_MAIN_CLK_AO, + spicc->base + SPICC_ENH_CTL0); + + writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK, + spicc->base + SPICC_TESTREG); + + while (meson_spicc_rxready(spicc)) + data = readl_relaxed(spicc->base + SPICC_RXDATA); + + if (spicc->data->has_oen) + writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0, + spicc->base + SPICC_ENH_CTL0); } static int meson_spicc_transfer_one(struct spi_master *master, @@ -357,8 +385,6 @@ static int meson_spicc_transfer_one(struct spi_master *master, struct spi_transfer *xfer) { struct meson_spicc_device *spicc = spi_master_get_devdata(master); - unsigned int burst_len; - u32 irq = 0; /* Store current transfer */ spicc->xfer = xfer; @@ -372,22 +398,22 @@ static int meson_spicc_transfer_one(struct spi_master *master, spicc->bytes_per_word = DIV_ROUND_UP(spicc->xfer->bits_per_word, 8); + if (xfer->len % spicc->bytes_per_word) + return -EINVAL; + /* Setup transfer parameters */ meson_spicc_setup_xfer(spicc, xfer); - burst_len = min_t(unsigned int, - spicc->xfer_remain / spicc->bytes_per_word, - SPICC_BURST_MAX); + meson_spicc_reset_fifo(spicc); - meson_spicc_setup_burst(spicc, burst_len); - - irq = meson_spicc_setup_rx_irq(spicc, irq); + /* Setup burst */ + meson_spicc_setup_burst(spicc); /* Start burst */ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); /* Enable interrupts */ - writel_relaxed(irq, spicc->base + SPICC_INTREG); + writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); return 1; } @@ -444,7 +470,7 @@ static int meson_spicc_prepare_message(struct spi_master *master, /* Setup no wait cycles by default */ writel_relaxed(0, spicc->base + SPICC_PERIODREG); - writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG); + writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG); return 0; } @@ -456,9 +482,6 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master) /* Disable all IRQs */ writel(0, spicc->base + SPICC_INTREG); - /* Disable controller */ - writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG); - device_reset_optional(&spicc->pdev->dev); return 0; @@ -477,11 +500,167 @@ static void meson_spicc_cleanup(struct spi_device *spi) spi->controller_state = NULL; } +/* + * The Clock Mux + * x-----------------x x------------x x------\ + * |---| pow2 fixed div |---| pow2 div |----| | + * | x-----------------x x------------x | | + * src ---| | mux |-- out + * | x-----------------x x------------x | | + * |---| enh fixed div |---| enh div |0---| | + * x-----------------x x------------x x------/ + * + * Clk path for GX series: + * src -> pow2 fixed div -> pow2 div -> out + * + * Clk path for AXG series: + * src -> pow2 fixed div -> pow2 div -> mux -> out + * src -> enh fixed div -> enh div -> mux -> out + * + * Clk path for G12A series: + * pclk -> pow2 fixed div -> pow2 div -> mux -> out + * pclk -> enh fixed div -> enh div -> mux -> out + */ + +static int meson_spicc_clk_init(struct meson_spicc_device *spicc) +{ + struct device *dev = &spicc->pdev->dev; + struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div; + struct clk_divider *pow2_div, *enh_div; + struct clk_mux *mux; + struct clk_init_data init; + struct clk *clk; + struct clk_parent_data parent_data[2]; + char name[64]; + + memset(&init, 0, sizeof(init)); + memset(&parent_data, 0, sizeof(parent_data)); + + init.parent_data = parent_data; + + /* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */ + + pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL); + if (!pow2_fixed_div) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev)); + init.name = name; + init.ops = &clk_fixed_factor_ops; + init.flags = 0; + if (spicc->data->has_pclk) + parent_data[0].hw = __clk_get_hw(spicc->pclk); + else + parent_data[0].hw = __clk_get_hw(spicc->core); + init.num_parents = 1; + + pow2_fixed_div->mult = 1, + pow2_fixed_div->div = 4, + pow2_fixed_div->hw.init = &init; + + clk = devm_clk_register(dev, &pow2_fixed_div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL); + if (!pow2_div) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev)); + init.name = name; + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + parent_data[0].hw = &pow2_fixed_div->hw; + init.num_parents = 1; + + pow2_div->shift = 16, + pow2_div->width = 3, + pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO, + pow2_div->reg = spicc->base + SPICC_CONREG; + pow2_div->hw.init = &init; + + clk = devm_clk_register(dev, &pow2_div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + if (!spicc->data->has_enhance_clk_div) { + spicc->clk = clk; + return 0; + } + + /* algorithm for enh div: rate = freq / 2 / (N + 1) */ + + enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL); + if (!enh_fixed_div) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev)); + init.name = name; + init.ops = &clk_fixed_factor_ops; + init.flags = 0; + if (spicc->data->has_pclk) + parent_data[0].hw = __clk_get_hw(spicc->pclk); + else + parent_data[0].hw = __clk_get_hw(spicc->core); + init.num_parents = 1; + + enh_fixed_div->mult = 1, + enh_fixed_div->div = 2, + enh_fixed_div->hw.init = &init; + + clk = devm_clk_register(dev, &enh_fixed_div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL); + if (!enh_div) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev)); + init.name = name; + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + parent_data[0].hw = &enh_fixed_div->hw; + init.num_parents = 1; + + enh_div->shift = 16, + enh_div->width = 8, + enh_div->reg = spicc->base + SPICC_ENH_CTL0; + enh_div->hw.init = &init; + + clk = devm_clk_register(dev, &enh_div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s#sel", dev_name(dev)); + init.name = name; + init.ops = &clk_mux_ops; + parent_data[0].hw = &pow2_div->hw; + parent_data[1].hw = &enh_div->hw; + init.num_parents = 2; + init.flags = CLK_SET_RATE_PARENT; + + mux->mask = 0x1, + mux->shift = 24, + mux->reg = spicc->base + SPICC_ENH_CTL0; + mux->hw.init = &init; + + spicc->clk = devm_clk_register(dev, &mux->hw); + if (WARN_ON(IS_ERR(spicc->clk))) + return PTR_ERR(spicc->clk); + + return 0; +} + static int meson_spicc_probe(struct platform_device *pdev) { struct spi_master *master; struct meson_spicc_device *spicc; - int ret, irq, rate; + int ret, irq; master = spi_alloc_master(&pdev->dev, sizeof(*spicc)); if (!master) { @@ -491,6 +670,13 @@ static int meson_spicc_probe(struct platform_device *pdev) spicc = spi_master_get_devdata(master); spicc->master = master; + spicc->data = of_device_get_match_data(&pdev->dev); + if (!spicc->data) { + dev_err(&pdev->dev, "failed to get match data\n"); + ret = -EINVAL; + goto out_master; + } + spicc->pdev = pdev; platform_set_drvdata(pdev, spicc); @@ -501,6 +687,10 @@ static int meson_spicc_probe(struct platform_device *pdev) goto out_master; } + /* Set master mode and enable controller */ + writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER, + spicc->base + SPICC_CONREG); + /* Disable all IRQs */ writel_relaxed(0, spicc->base + SPICC_INTREG); @@ -519,12 +709,26 @@ static int meson_spicc_probe(struct platform_device *pdev) goto out_master; } + if (spicc->data->has_pclk) { + spicc->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(spicc->pclk)) { + dev_err(&pdev->dev, "pclk clock request failed\n"); + ret = PTR_ERR(spicc->pclk); + goto out_master; + } + } + ret = clk_prepare_enable(spicc->core); if (ret) { dev_err(&pdev->dev, "core clock enable failed\n"); goto out_master; } - rate = clk_get_rate(spicc->core); + + ret = clk_prepare_enable(spicc->pclk); + if (ret) { + dev_err(&pdev->dev, "pclk clock enable failed\n"); + goto out_master; + } device_reset_optional(&pdev->dev); @@ -536,7 +740,8 @@ static int meson_spicc_probe(struct platform_device *pdev) SPI_BPW_MASK(16) | SPI_BPW_MASK(8); master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); - master->min_speed_hz = rate >> 9; + master->min_speed_hz = spicc->data->min_speed_hz; + master->max_speed_hz = spicc->data->max_speed_hz; master->setup = meson_spicc_setup; master->cleanup = meson_spicc_cleanup; master->prepare_message = meson_spicc_prepare_message; @@ -544,11 +749,13 @@ static int meson_spicc_probe(struct platform_device *pdev) master->transfer_one = meson_spicc_transfer_one; master->use_gpio_descriptors = true; - /* Setup max rate according to the Meson GX datasheet */ - if ((rate >> 2) > SPICC_MAX_FREQ) - master->max_speed_hz = SPICC_MAX_FREQ; - else - master->max_speed_hz = rate >> 2; + meson_spicc_oen_enable(spicc); + + ret = meson_spicc_clk_init(spicc); + if (ret) { + dev_err(&pdev->dev, "clock registration failed\n"); + goto out_master; + } ret = devm_spi_register_master(&pdev->dev, master); if (ret) { @@ -560,6 +767,7 @@ static int meson_spicc_probe(struct platform_device *pdev) out_clk: clk_disable_unprepare(spicc->core); + clk_disable_unprepare(spicc->pclk); out_master: spi_master_put(master); @@ -575,13 +783,47 @@ static int meson_spicc_remove(struct platform_device *pdev) writel(0, spicc->base + SPICC_CONREG); clk_disable_unprepare(spicc->core); + clk_disable_unprepare(spicc->pclk); return 0; } +static const struct meson_spicc_data meson_spicc_gx_data = { + .max_speed_hz = 30000000, + .min_speed_hz = 325000, + .fifo_size = 16, +}; + +static const struct meson_spicc_data meson_spicc_axg_data = { + .max_speed_hz = 80000000, + .min_speed_hz = 325000, + .fifo_size = 16, + .has_oen = true, + .has_enhance_clk_div = true, +}; + +static const struct meson_spicc_data meson_spicc_g12a_data = { + .max_speed_hz = 166666666, + .min_speed_hz = 50000, + .fifo_size = 15, + .has_oen = true, + .has_enhance_clk_div = true, + .has_pclk = true, +}; + static const struct of_device_id meson_spicc_of_match[] = { - { .compatible = "amlogic,meson-gx-spicc", }, - { .compatible = "amlogic,meson-axg-spicc", }, + { + .compatible = "amlogic,meson-gx-spicc", + .data = &meson_spicc_gx_data, + }, + { + .compatible = "amlogic,meson-axg-spicc", + .data = &meson_spicc_axg_data, + }, + { + .compatible = "amlogic,meson-g12a-spicc", + .data = &meson_spicc_g12a_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, meson_spicc_of_match); diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c new file mode 100644 index 000000000000..c15a9910549f --- /dev/null +++ b/drivers/spi/spi-mtk-nor.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Mediatek SPI NOR controller driver +// +// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> +#include <linux/string.h> + +#define DRIVER_NAME "mtk-spi-nor" + +#define MTK_NOR_REG_CMD 0x00 +#define MTK_NOR_CMD_WRITE BIT(4) +#define MTK_NOR_CMD_PROGRAM BIT(2) +#define MTK_NOR_CMD_READ BIT(0) +#define MTK_NOR_CMD_MASK GENMASK(5, 0) + +#define MTK_NOR_REG_PRG_CNT 0x04 +#define MTK_NOR_REG_RDATA 0x0c + +#define MTK_NOR_REG_RADR0 0x10 +#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) +#define MTK_NOR_REG_RADR3 0xc8 + +#define MTK_NOR_REG_WDATA 0x1c + +#define MTK_NOR_REG_PRGDATA0 0x20 +#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) +#define MTK_NOR_REG_PRGDATA_MAX 5 + +#define MTK_NOR_REG_SHIFT0 0x38 +#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) +#define MTK_NOR_REG_SHIFT_MAX 9 + +#define MTK_NOR_REG_CFG1 0x60 +#define MTK_NOR_FAST_READ BIT(0) + +#define MTK_NOR_REG_CFG2 0x64 +#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) +#define MTK_NOR_WR_BUF_EN BIT(0) + +#define MTK_NOR_REG_PP_DATA 0x98 + +#define MTK_NOR_REG_IRQ_STAT 0xa8 +#define MTK_NOR_REG_IRQ_EN 0xac +#define MTK_NOR_IRQ_DMA BIT(7) +#define MTK_NOR_IRQ_MASK GENMASK(7, 0) + +#define MTK_NOR_REG_CFG3 0xb4 +#define MTK_NOR_DISABLE_WREN BIT(7) +#define MTK_NOR_DISABLE_SR_POLL BIT(5) + +#define MTK_NOR_REG_WP 0xc4 +#define MTK_NOR_ENABLE_SF_CMD 0x30 + +#define MTK_NOR_REG_BUSCFG 0xcc +#define MTK_NOR_4B_ADDR BIT(4) +#define MTK_NOR_QUAD_ADDR BIT(3) +#define MTK_NOR_QUAD_READ BIT(2) +#define MTK_NOR_DUAL_ADDR BIT(1) +#define MTK_NOR_DUAL_READ BIT(0) +#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) + +#define MTK_NOR_REG_DMA_CTL 0x718 +#define MTK_NOR_DMA_START BIT(0) + +#define MTK_NOR_REG_DMA_FADR 0x71c +#define MTK_NOR_REG_DMA_DADR 0x720 +#define MTK_NOR_REG_DMA_END_DADR 0x724 + +#define MTK_NOR_PRG_MAX_SIZE 6 +// Reading DMA src/dst addresses have to be 16-byte aligned +#define MTK_NOR_DMA_ALIGN 16 +#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) +// and we allocate a bounce buffer if destination address isn't aligned. +#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE + +// Buffered page program can do one 128-byte transfer +#define MTK_NOR_PP_SIZE 128 + +#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq) + +struct mtk_nor { + struct spi_controller *ctlr; + struct device *dev; + void __iomem *base; + u8 *buffer; + struct clk *spi_clk; + struct clk *ctlr_clk; + unsigned int spi_freq; + bool wbuf_en; + bool has_irq; + struct completion op_done; +}; + +static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) +{ + u32 val = readl(sp->base + reg); + + val &= ~clr; + val |= set; + writel(val, sp->base + reg); +} + +static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) +{ + ulong delay = CLK_TO_US(sp, clk); + u32 reg; + int ret; + + writel(cmd, sp->base + MTK_NOR_REG_CMD); + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), + delay / 3, (delay + 1) * 200); + if (ret < 0) + dev_err(sp->dev, "command %u timeout.\n", cmd); + return ret; +} + +static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) +{ + u32 addr = op->addr.val; + int i; + + for (i = 0; i < 3; i++) { + writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); + addr >>= 8; + } + if (op->addr.nbytes == 4) { + writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); + } else { + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); + } +} + +static bool mtk_nor_match_read(const struct spi_mem_op *op) +{ + int dummy = 0; + + if (op->dummy.buswidth) + dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; + + if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { + if (op->addr.buswidth == 1) + return dummy == 8; + else if (op->addr.buswidth == 2) + return dummy == 4; + else if (op->addr.buswidth == 4) + return dummy == 6; + } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { + if (op->cmd.opcode == 0x03) + return dummy == 0; + else if (op->cmd.opcode == 0x0b) + return dummy == 8; + } + return false; +} + +static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + size_t len; + + if (!op->data.nbytes) + return 0; + + if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { + if ((op->data.dir == SPI_MEM_DATA_IN) && + mtk_nor_match_read(op)) { + if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || + (op->data.nbytes < MTK_NOR_DMA_ALIGN)) + op->data.nbytes = 1; + else if (!((ulong)(op->data.buf.in) & + MTK_NOR_DMA_ALIGN_MASK)) + op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; + else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) + op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; + return 0; + } else if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes >= MTK_NOR_PP_SIZE) + op->data.nbytes = MTK_NOR_PP_SIZE; + else + op->data.nbytes = 1; + return 0; + } + } + + len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes - + op->dummy.nbytes; + if (op->data.nbytes > len) + op->data.nbytes = len; + + return 0; +} + +static bool mtk_nor_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + size_t len; + + if (op->cmd.buswidth != 1) + return false; + + if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { + if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) + return true; + else if (op->data.dir == SPI_MEM_DATA_OUT) + return (op->addr.buswidth == 1) && + (op->dummy.buswidth == 0) && + (op->data.buswidth == 1); + } + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + if ((len > MTK_NOR_PRG_MAX_SIZE) || + ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE))) + return false; + return true; +} + +static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) +{ + u32 reg = 0; + + if (op->addr.nbytes == 4) + reg |= MTK_NOR_4B_ADDR; + + if (op->data.buswidth == 4) { + reg |= MTK_NOR_QUAD_READ; + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); + if (op->addr.buswidth == 4) + reg |= MTK_NOR_QUAD_ADDR; + } else if (op->data.buswidth == 2) { + reg |= MTK_NOR_DUAL_READ; + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); + if (op->addr.buswidth == 2) + reg |= MTK_NOR_DUAL_ADDR; + } else { + if (op->cmd.opcode == 0x0b) + mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); + else + mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); + } + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); +} + +static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length, + u8 *buffer) +{ + int ret = 0; + ulong delay; + u32 reg; + dma_addr_t dma_addr; + + dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE); + if (dma_mapping_error(sp->dev, dma_addr)) { + dev_err(sp->dev, "failed to map dma buffer.\n"); + return -EINVAL; + } + + writel(from, sp->base + MTK_NOR_REG_DMA_FADR); + writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); + writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); + + if (sp->has_irq) { + reinit_completion(&sp->op_done); + mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); + } + + mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); + + delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); + + if (sp->has_irq) { + if (!wait_for_completion_timeout(&sp->op_done, + (delay + 1) * 100)) + ret = -ETIMEDOUT; + } else { + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, + !(reg & MTK_NOR_DMA_START), delay / 3, + (delay + 1) * 100); + } + + dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE); + if (ret < 0) + dev_err(sp->dev, "dma read timeout.\n"); + + return ret; +} + +static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from, + unsigned int length, u8 *buffer) +{ + unsigned int rdlen; + int ret; + + if (length & MTK_NOR_DMA_ALIGN_MASK) + rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; + else + rdlen = length; + + ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer); + if (ret) + return ret; + + memcpy(buffer, sp->buffer, length); + return 0; +} + +static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) +{ + u8 *buf = op->data.buf.in; + int ret; + + ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); + if (!ret) + buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); + return ret; +} + +static int mtk_nor_write_buffer_enable(struct mtk_nor *sp) +{ + int ret; + u32 val; + + if (sp->wbuf_en) + return 0; + + val = readl(sp->base + MTK_NOR_REG_CFG2); + writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, + val & MTK_NOR_WR_BUF_EN, 0, 10000); + if (!ret) + sp->wbuf_en = true; + return ret; +} + +static int mtk_nor_write_buffer_disable(struct mtk_nor *sp) +{ + int ret; + u32 val; + + if (!sp->wbuf_en) + return 0; + val = readl(sp->base + MTK_NOR_REG_CFG2); + writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, + !(val & MTK_NOR_WR_BUF_EN), 0, 10000); + if (!ret) + sp->wbuf_en = false; + return ret; +} + +static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) +{ + const u8 *buf = op->data.buf.out; + u32 val; + int ret, i; + + ret = mtk_nor_write_buffer_enable(sp); + if (ret < 0) + return ret; + + for (i = 0; i < op->data.nbytes; i += 4) { + val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | + buf[i]; + writel(val, sp->base + MTK_NOR_REG_PP_DATA); + } + return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, + (op->data.nbytes + 5) * BITS_PER_BYTE); +} + +static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, + const struct spi_mem_op *op) +{ + const u8 *buf = op->data.buf.out; + int ret; + + ret = mtk_nor_write_buffer_disable(sp); + if (ret < 0) + return ret; + writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); + return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); +} + +int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); + int ret; + + if ((op->data.nbytes == 0) || + ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) + return -ENOTSUPP; + + if (op->data.dir == SPI_MEM_DATA_OUT) { + mtk_nor_set_addr(sp, op); + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); + if (op->data.nbytes == MTK_NOR_PP_SIZE) + return mtk_nor_pp_buffered(sp, op); + return mtk_nor_pp_unbuffered(sp, op); + } + + if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { + ret = mtk_nor_write_buffer_disable(sp); + if (ret < 0) + return ret; + mtk_nor_setup_bus(sp, op); + if (op->data.nbytes == 1) { + mtk_nor_set_addr(sp, op); + return mtk_nor_read_pio(sp, op); + } else if (((ulong)(op->data.buf.in) & + MTK_NOR_DMA_ALIGN_MASK)) { + return mtk_nor_read_bounce(sp, op->addr.val, + op->data.nbytes, + op->data.buf.in); + } else { + return mtk_nor_read_dma(sp, op->addr.val, + op->data.nbytes, + op->data.buf.in); + } + } + + return -ENOTSUPP; +} + +static int mtk_nor_setup(struct spi_device *spi) +{ + struct mtk_nor *sp = spi_controller_get_devdata(spi->master); + + if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { + dev_err(&spi->dev, "spi clock should be %u Hz.\n", + sp->spi_freq); + return -EINVAL; + } + spi->max_speed_hz = sp->spi_freq; + + return 0; +} + +static int mtk_nor_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct mtk_nor *sp = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + unsigned long trx_len = 0; + int stat = 0; + int reg_offset = MTK_NOR_REG_PRGDATA_MAX; + void __iomem *reg; + const u8 *txbuf; + u8 *rxbuf; + int i; + + list_for_each_entry(t, &m->transfers, transfer_list) { + txbuf = t->tx_buf; + for (i = 0; i < t->len; i++, reg_offset--) { + reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); + if (txbuf) + writeb(txbuf[i], reg); + else + writeb(0, reg); + } + trx_len += t->len; + } + + writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); + + stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, + trx_len * BITS_PER_BYTE); + if (stat < 0) + goto msg_done; + + reg_offset = trx_len - 1; + list_for_each_entry(t, &m->transfers, transfer_list) { + rxbuf = t->rx_buf; + for (i = 0; i < t->len; i++, reg_offset--) { + reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); + if (rxbuf) + rxbuf[i] = readb(reg); + } + } + + m->actual_length = trx_len; +msg_done: + m->status = stat; + spi_finalize_current_message(master); + + return 0; +} + +static void mtk_nor_disable_clk(struct mtk_nor *sp) +{ + clk_disable_unprepare(sp->spi_clk); + clk_disable_unprepare(sp->ctlr_clk); +} + +static int mtk_nor_enable_clk(struct mtk_nor *sp) +{ + int ret; + + ret = clk_prepare_enable(sp->spi_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(sp->ctlr_clk); + if (ret) { + clk_disable_unprepare(sp->spi_clk); + return ret; + } + + return 0; +} + +static int mtk_nor_init(struct mtk_nor *sp) +{ + int ret; + + ret = mtk_nor_enable_clk(sp); + if (ret) + return ret; + + sp->spi_freq = clk_get_rate(sp->spi_clk); + + writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); + mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); + mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, + MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); + + return ret; +} + +static irqreturn_t mtk_nor_irq_handler(int irq, void *data) +{ + struct mtk_nor *sp = data; + u32 irq_status, irq_enabled; + + irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); + irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); + // write status back to clear interrupt + writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); + + if (!(irq_status & irq_enabled)) + return IRQ_NONE; + + if (irq_status & MTK_NOR_IRQ_DMA) { + complete(&sp->op_done); + writel(0, sp->base + MTK_NOR_REG_IRQ_EN); + } + + return IRQ_HANDLED; +} + +static size_t mtk_max_msg_size(struct spi_device *spi) +{ + return MTK_NOR_PRG_MAX_SIZE; +} + +static const struct spi_controller_mem_ops mtk_nor_mem_ops = { + .adjust_op_size = mtk_nor_adjust_op_size, + .supports_op = mtk_nor_supports_op, + .exec_op = mtk_nor_exec_op +}; + +static const struct of_device_id mtk_nor_match[] = { + { .compatible = "mediatek,mt8173-nor" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_nor_match); + +static int mtk_nor_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct mtk_nor *sp; + void __iomem *base; + u8 *buffer; + struct clk *spi_clk, *ctlr_clk; + int ret, irq; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + spi_clk = devm_clk_get(&pdev->dev, "spi"); + if (IS_ERR(spi_clk)) + return PTR_ERR(spi_clk); + + ctlr_clk = devm_clk_get(&pdev->dev, "sf"); + if (IS_ERR(ctlr_clk)) + return PTR_ERR(ctlr_clk); + + buffer = devm_kmalloc(&pdev->dev, + MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, + GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK) + buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) & + ~MTK_NOR_DMA_ALIGN_MASK); + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); + if (!ctlr) { + dev_err(&pdev->dev, "failed to allocate spi controller\n"); + return -ENOMEM; + } + + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->max_message_size = mtk_max_msg_size; + ctlr->mem_ops = &mtk_nor_mem_ops; + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; + ctlr->num_chipselect = 1; + ctlr->setup = mtk_nor_setup; + ctlr->transfer_one_message = mtk_nor_transfer_one_message; + + dev_set_drvdata(&pdev->dev, ctlr); + + sp = spi_controller_get_devdata(ctlr); + sp->base = base; + sp->buffer = buffer; + sp->has_irq = false; + sp->wbuf_en = false; + sp->ctlr = ctlr; + sp->dev = &pdev->dev; + sp->spi_clk = spi_clk; + sp->ctlr_clk = ctlr_clk; + + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0) { + dev_warn(sp->dev, "IRQ not available."); + } else { + writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT); + writel(0, base + MTK_NOR_REG_IRQ_EN); + ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, + pdev->name, sp); + if (ret < 0) { + dev_warn(sp->dev, "failed to request IRQ."); + } else { + init_completion(&sp->op_done); + sp->has_irq = true; + } + } + + ret = mtk_nor_init(sp); + if (ret < 0) { + kfree(ctlr); + return ret; + } + + dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); + + return devm_spi_register_controller(&pdev->dev, ctlr); +} + +static int mtk_nor_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct mtk_nor *sp; + + ctlr = dev_get_drvdata(&pdev->dev); + sp = spi_controller_get_devdata(ctlr); + + mtk_nor_disable_clk(sp); + + return 0; +} + +static struct platform_driver mtk_nor_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = mtk_nor_match, + }, + .probe = mtk_nor_probe, + .remove = mtk_nor_remove, +}; + +module_platform_driver(mtk_nor_driver); + +MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); +MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c new file mode 100644 index 000000000000..4f94c9127fc1 --- /dev/null +++ b/drivers/spi/spi-mux.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// General Purpose SPI multiplexer + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mux/consumer.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> + +#define SPI_MUX_NO_CS ((unsigned int)-1) + +/** + * DOC: Driver description + * + * This driver supports a MUX on an SPI bus. This can be useful when you need + * more chip selects than the hardware peripherals support, or than are + * available in a particular board setup. + * + * The driver will create an additional SPI controller. Devices added under the + * mux will be handled as 'chip selects' on this controller. + */ + +/** + * struct spi_mux_priv - the basic spi_mux structure + * @spi: pointer to the device struct attached to the parent + * spi controller + * @current_cs: The current chip select set in the mux + * @child_msg_complete: The mux replaces the complete callback in the child's + * message to its own callback; this field is used by the + * driver to store the child's callback during a transfer + * @child_msg_context: Used to store the child's context to the callback + * @child_msg_dev: Used to store the spi_device pointer to the child + * @mux: mux_control structure used to provide chip selects for + * downstream spi devices + */ +struct spi_mux_priv { + struct spi_device *spi; + unsigned int current_cs; + + void (*child_msg_complete)(void *context); + void *child_msg_context; + struct spi_device *child_msg_dev; + struct mux_control *mux; +}; + +/* should not get called when the parent controller is doing a transfer */ +static int spi_mux_select(struct spi_device *spi) +{ + struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller); + int ret; + + if (priv->current_cs == spi->chip_select) + return 0; + + dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n", + spi->chip_select); + + /* copy the child device's settings except for the cs */ + priv->spi->max_speed_hz = spi->max_speed_hz; + priv->spi->mode = spi->mode; + priv->spi->bits_per_word = spi->bits_per_word; + + ret = mux_control_select(priv->mux, spi->chip_select); + if (ret) + return ret; + + priv->current_cs = spi->chip_select; + + return 0; +} + +static int spi_mux_setup(struct spi_device *spi) +{ + struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller); + + /* + * can be called multiple times, won't do a valid setup now but we will + * change the settings when we do a transfer (necessary because we + * can't predict from which device it will be anyway) + */ + return spi_setup(priv->spi); +} + +static void spi_mux_complete_cb(void *context) +{ + struct spi_mux_priv *priv = (struct spi_mux_priv *)context; + struct spi_controller *ctlr = spi_get_drvdata(priv->spi); + struct spi_message *m = ctlr->cur_msg; + + m->complete = priv->child_msg_complete; + m->context = priv->child_msg_context; + m->spi = priv->child_msg_dev; + spi_finalize_current_message(ctlr); + mux_control_deselect(priv->mux); +} + +static int spi_mux_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *m) +{ + struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr); + struct spi_device *spi = m->spi; + int ret; + + ret = spi_mux_select(spi); + if (ret) + return ret; + + /* + * Replace the complete callback, context and spi_device with our own + * pointers. Save originals + */ + priv->child_msg_complete = m->complete; + priv->child_msg_context = m->context; + priv->child_msg_dev = m->spi; + + m->complete = spi_mux_complete_cb; + m->context = priv; + m->spi = priv->spi; + + /* do the transfer */ + return spi_async(priv->spi, m); +} + +static int spi_mux_probe(struct spi_device *spi) +{ + struct spi_controller *ctlr; + struct spi_mux_priv *priv; + int ret; + + ctlr = spi_alloc_master(&spi->dev, sizeof(*priv)); + if (!ctlr) + return -ENOMEM; + + spi_set_drvdata(spi, ctlr); + priv = spi_controller_get_devdata(ctlr); + priv->spi = spi; + + priv->mux = devm_mux_control_get(&spi->dev, NULL); + if (IS_ERR(priv->mux)) { + ret = PTR_ERR(priv->mux); + if (ret != -EPROBE_DEFER) + dev_err(&spi->dev, "failed to get control-mux\n"); + goto err_put_ctlr; + } + + priv->current_cs = SPI_MUX_NO_CS; + + /* supported modes are the same as our parent's */ + ctlr->mode_bits = spi->controller->mode_bits; + ctlr->flags = spi->controller->flags; + ctlr->transfer_one_message = spi_mux_transfer_one_message; + ctlr->setup = spi_mux_setup; + ctlr->num_chipselect = mux_control_states(priv->mux); + ctlr->bus_num = -1; + ctlr->dev.of_node = spi->dev.of_node; + + ret = devm_spi_register_controller(&spi->dev, ctlr); + if (ret) + goto err_put_ctlr; + + return 0; + +err_put_ctlr: + spi_controller_put(ctlr); + + return ret; +} + +static const struct of_device_id spi_mux_of_match[] = { + { .compatible = "spi-mux" }, + { } +}; + +static struct spi_driver spi_mux_driver = { + .probe = spi_mux_probe, + .driver = { + .name = "spi-mux", + .of_match_table = spi_mux_of_match, + }, +}; + +module_spi_driver(spi_mux_driver); + +MODULE_DESCRIPTION("SPI multiplexer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index dce85ee07cd0..918918a9e049 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -22,7 +22,6 @@ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -32,7 +31,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/completion.h> -#include <linux/gpio.h> +#include <linux/pinctrl/consumer.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <linux/module.h> diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 8c5084a3a617..1ccda82da206 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -307,6 +307,7 @@ #define POLL_TOUT 5000 #define NXP_FSPI_MAX_CHIPSELECT 4 +#define NXP_FSPI_MIN_IOMAP SZ_4M struct nxp_fspi_devtype_data { unsigned int rxfifo; @@ -324,11 +325,29 @@ static const struct nxp_fspi_devtype_data lx2160a_data = { .little_endian = true, /* little-endian */ }; +static const struct nxp_fspi_devtype_data imx8mm_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .little_endian = true, /* little-endian */ +}; + +static const struct nxp_fspi_devtype_data imx8qxp_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .little_endian = true, /* little-endian */ +}; + struct nxp_fspi { void __iomem *iobase; void __iomem *ahb_addr; u32 memmap_phy; u32 memmap_phy_size; + u32 memmap_start; + u32 memmap_len; struct clk *clk, *clk_en; struct device *dev; struct completion c; @@ -641,12 +660,35 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) f->selected = spi->chip_select; } -static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) +static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) { + u32 start = op->addr.val; u32 len = op->data.nbytes; + /* if necessary, ioremap before AHB read */ + if ((!f->ahb_addr) || start < f->memmap_start || + start + len > f->memmap_start + f->memmap_len) { + if (f->ahb_addr) + iounmap(f->ahb_addr); + + f->memmap_start = start; + f->memmap_len = len > NXP_FSPI_MIN_IOMAP ? + len : NXP_FSPI_MIN_IOMAP; + + f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start, + f->memmap_len); + + if (!f->ahb_addr) { + dev_err(f->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + } + /* Read out the data directly from the AHB buffer. */ - memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len); + memcpy_fromio(op->data.buf.in, + f->ahb_addr + start - f->memmap_start, len); + + return 0; } static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, @@ -806,7 +848,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) */ if (op->data.nbytes > (f->devtype_data->rxfifo - 4) && op->data.dir == SPI_MEM_DATA_IN) { - nxp_fspi_read_ahb(f, op); + err = nxp_fspi_read_ahb(f, op); } else { if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) nxp_fspi_fill_txfifo(f, op); @@ -871,8 +913,9 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR); /* enable module */ - fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF), - base + FSPI_MCR0); + fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | + FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN, + base + FSPI_MCR0); /* * Disable same device enable bit and configure all slave devices @@ -976,9 +1019,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) /* find the resources - controller memory mapped space */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); - f->ahb_addr = devm_ioremap_resource(dev, res); - if (IS_ERR(f->ahb_addr)) { - ret = PTR_ERR(f->ahb_addr); + if (!res) { + ret = -ENODEV; goto err_put_ctrl; } @@ -1057,6 +1099,9 @@ static int nxp_fspi_remove(struct platform_device *pdev) mutex_destroy(&f->lock); + if (f->ahb_addr) + iounmap(f->ahb_addr); + return 0; } @@ -1076,6 +1121,8 @@ static int nxp_fspi_resume(struct device *dev) static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, + { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, }, + { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7e2292c11d12..e9e256718ef4 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -130,6 +130,7 @@ struct omap2_mcspi { int fifo_depth; bool slave_aborted; unsigned int pin_dir:1; + size_t max_xfer_len; }; struct omap2_mcspi_cs { @@ -974,20 +975,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, * Note that we currently allow DMA only if we get a channel * for both rx and tx. Otherwise we'll do PIO for both rx and tx. */ -static int omap2_mcspi_request_dma(struct spi_device *spi) +static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi, + struct omap2_mcspi_dma *mcspi_dma) { - struct spi_master *master = spi->master; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; int ret = 0; - mcspi = spi_master_get_devdata(master); - mcspi_dma = mcspi->dma_channels + spi->chip_select; - - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - - mcspi_dma->dma_rx = dma_request_chan(&master->dev, + mcspi_dma->dma_rx = dma_request_chan(mcspi->dev, mcspi_dma->dma_rx_ch_name); if (IS_ERR(mcspi_dma->dma_rx)) { ret = PTR_ERR(mcspi_dma->dma_rx); @@ -995,7 +988,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) goto no_dma; } - mcspi_dma->dma_tx = dma_request_chan(&master->dev, + mcspi_dma->dma_tx = dma_request_chan(mcspi->dev, mcspi_dma->dma_tx_ch_name); if (IS_ERR(mcspi_dma->dma_tx)) { ret = PTR_ERR(mcspi_dma->dma_tx); @@ -1004,20 +997,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) mcspi_dma->dma_rx = NULL; } + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + no_dma: return ret; } +static void omap2_mcspi_release_dma(struct spi_master *master) +{ + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi_dma *mcspi_dma; + int i; + + for (i = 0; i < master->num_chipselect; i++) { + mcspi_dma = &mcspi->dma_channels[i]; + + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; + } + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; + } + } +} + static int omap2_mcspi_setup(struct spi_device *spi) { int ret; struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); struct omap2_mcspi_regs *ctx = &mcspi->ctx; - struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs = spi->controller_state; - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) @@ -1042,13 +1055,6 @@ static int omap2_mcspi_setup(struct spi_device *spi) } } - if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { - ret = omap2_mcspi_request_dma(spi); - if (ret) - dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n", - ret); - } - ret = pm_runtime_get_sync(mcspi->dev); if (ret < 0) { pm_runtime_put_noidle(mcspi->dev); @@ -1065,12 +1071,8 @@ static int omap2_mcspi_setup(struct spi_device *spi) static void omap2_mcspi_cleanup(struct spi_device *spi) { - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs; - mcspi = spi_master_get_devdata(spi->master); - if (spi->controller_state) { /* Unlink controller state from context save list */ cs = spi->controller_state; @@ -1079,19 +1081,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) kfree(cs); } - if (spi->chip_select < spi->master->num_chipselect) { - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - - if (mcspi_dma->dma_rx) { - dma_release_channel(mcspi_dma->dma_rx); - mcspi_dma->dma_rx = NULL; - } - if (mcspi_dma->dma_tx) { - dma_release_channel(mcspi_dma->dma_tx); - mcspi_dma->dma_tx = NULL; - } - } - if (gpio_is_valid(spi->cs_gpio)) gpio_free(spi->cs_gpio); } @@ -1302,9 +1291,24 @@ static bool omap2_mcspi_can_dma(struct spi_master *master, if (spi_controller_is_slave(master)) return true; + master->dma_rx = mcspi_dma->dma_rx; + master->dma_tx = mcspi_dma->dma_tx; + return (xfer->len >= DMA_MIN_BYTES); } +static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi) +{ + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = + &mcspi->dma_channels[spi->chip_select]; + + if (mcspi->max_xfer_len && mcspi_dma->dma_rx) + return mcspi->max_xfer_len; + + return SIZE_MAX; +} + static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi) { struct spi_master *master = mcspi->master; @@ -1373,6 +1377,11 @@ static struct omap2_mcspi_platform_config omap4_pdata = { .regs_offset = OMAP4_MCSPI_REG_OFFSET, }; +static struct omap2_mcspi_platform_config am654_pdata = { + .regs_offset = OMAP4_MCSPI_REG_OFFSET, + .max_xfer_len = SZ_4K - 1, +}; + static const struct of_device_id omap_mcspi_of_match[] = { { .compatible = "ti,omap2-mcspi", @@ -1382,6 +1391,10 @@ static const struct of_device_id omap_mcspi_of_match[] = { .compatible = "ti,omap4-mcspi", .data = &omap4_pdata, }, + { + .compatible = "ti,am654-mcspi", + .data = &am654_pdata, + }, { }, }; MODULE_DEVICE_TABLE(of, omap_mcspi_of_match); @@ -1439,6 +1452,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev) mcspi->pin_dir = pdata->pin_dir; } regs_offset = pdata->regs_offset; + if (pdata->max_xfer_len) { + mcspi->max_xfer_len = pdata->max_xfer_len; + master->max_transfer_size = omap2_mcspi_max_xfer_size; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); mcspi->base = devm_ioremap_resource(&pdev->dev, r); @@ -1464,6 +1481,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev) for (i = 0; i < master->num_chipselect; i++) { sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i); sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i); + + status = omap2_mcspi_request_dma(mcspi, + &mcspi->dma_channels[i]); + if (status == -EPROBE_DEFER) + goto free_master; } status = platform_get_irq(pdev, 0); @@ -1501,6 +1523,7 @@ disable_pm: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_master: + omap2_mcspi_release_dma(master); spi_master_put(master); return status; } @@ -1510,6 +1533,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + omap2_mcspi_release_dma(master); + pm_runtime_dont_use_autosuspend(mcspi->dev); pm_runtime_put_sync(mcspi->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 4c7a71f0fb3e..73d2a65d0b6e 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -70,6 +70,10 @@ MODULE_ALIAS("platform:pxa2xx-spi"); #define LPSS_CAPS_CS_EN_SHIFT 9 #define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT) +#define LPSS_PRIV_CLOCK_GATE 0x38 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3 + struct lpss_config { /* LPSS offset from drv_data->ioaddr */ unsigned offset; @@ -86,6 +90,8 @@ struct lpss_config { unsigned cs_sel_shift; unsigned cs_sel_mask; unsigned cs_num; + /* Quirks */ + unsigned cs_clk_stays_gated : 1; }; /* Keep these sorted with enum pxa_ssp_type */ @@ -156,6 +162,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 56, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, }; @@ -185,6 +192,11 @@ static bool is_quark_x1000_ssp(const struct driver_data *drv_data) return drv_data->ssp_type == QUARK_X1000_SSP; } +static bool is_mmp2_ssp(const struct driver_data *drv_data) +{ + return drv_data->ssp_type == MMP2_SSP; +} + static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data) { switch (drv_data->ssp_type) { @@ -383,6 +395,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable) else value |= LPSS_CS_CONTROL_CS_HIGH; __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); + if (config->cs_clk_stays_gated) { + u32 clkgate; + + /* + * Changing CS alone when dynamic clock gating is on won't + * actually flip CS at that time. This ruins SPI transfers + * that specify delays, or have no data. Toggle the clock mode + * to force on briefly to poke the CS pin to move. + */ + clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE); + value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) | + LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON; + + __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value); + __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate); + } } static void cs_assert(struct spi_device *spi) @@ -463,8 +491,8 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) static void pxa2xx_spi_off(struct driver_data *drv_data) { - /* On MMP, disabling SSE seems to corrupt the rx fifo */ - if (drv_data->ssp_type == MMP2_SSP) + /* On MMP, disabling SSE seems to corrupt the Rx FIFO */ + if (is_mmp2_ssp(drv_data)) return; pxa2xx_spi_write(drv_data, SSCR0, @@ -1070,7 +1098,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - if (drv_data->ssp_type != MMP2_SSP) + if (!is_mmp2_ssp(drv_data)) pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, chip->timeout); @@ -1084,7 +1112,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, pxa2xx_spi_write(drv_data, SSTO, chip->timeout); } - if (drv_data->ssp_type == MMP2_SSP) { + if (is_mmp2_ssp(drv_data)) { u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR) & SSSR_TFL_MASK) >> 8; @@ -1548,18 +1576,18 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) else if (pcidev_id) type = (enum pxa_ssp_type)pcidev_id->driver_data; else - return NULL; + return ERR_PTR(-EINVAL); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) - return NULL; + return ERR_PTR(-ENOMEM); ssp = &pdata->ssp; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ssp->mmio_base)) - return NULL; + return ERR_CAST(ssp->mmio_base); ssp->phys_base = res->start; @@ -1573,11 +1601,11 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) ssp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssp->clk)) - return NULL; + return ERR_CAST(ssp->clk); ssp->irq = platform_get_irq(pdev, 0); if (ssp->irq < 0) - return NULL; + return ERR_PTR(ssp->irq); ssp->type = type; ssp->dev = &pdev->dev; @@ -1634,9 +1662,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info = dev_get_platdata(dev); if (!platform_info) { platform_info = pxa2xx_spi_init_pdata(pdev); - if (!platform_info) { + if (IS_ERR(platform_info)) { dev_err(&pdev->dev, "missing platform data\n"); - return -ENODEV; + return PTR_ERR(platform_info); } } @@ -1884,11 +1912,7 @@ out_error_controller_alloc: static int pxa2xx_spi_remove(struct platform_device *pdev) { struct driver_data *drv_data = platform_get_drvdata(pdev); - struct ssp_device *ssp; - - if (!drv_data) - return 0; - ssp = drv_data->ssp; + struct ssp_device *ssp = drv_data->ssp; pm_runtime_get_sync(&pdev->dev); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index dd3434a407ea..a364b99497e2 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device) struct spi_qup *controller = spi_master_get_devdata(master); int ret; + if (pm_runtime_suspended(device)) { + ret = spi_qup_pm_resume_runtime(device); + if (ret) + return ret; + } ret = spi_master_suspend(master); if (ret) return ret; @@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device) if (ret) return ret; - if (!pm_runtime_suspended(device)) { - clk_disable_unprepare(controller->cclk); - clk_disable_unprepare(controller->iclk); - } + clk_disable_unprepare(controller->cclk); + clk_disable_unprepare(controller->iclk); return 0; } diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 2cc6d9951b52..70ef63e0b6b8 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -843,14 +843,17 @@ static const struct dev_pm_ops rockchip_spi_pm = { }; static const struct of_device_id rockchip_spi_dt_match[] = { - { .compatible = "rockchip,rv1108-spi", }, + { .compatible = "rockchip,px30-spi", }, { .compatible = "rockchip,rk3036-spi", }, { .compatible = "rockchip,rk3066-spi", }, { .compatible = "rockchip,rk3188-spi", }, { .compatible = "rockchip,rk3228-spi", }, { .compatible = "rockchip,rk3288-spi", }, + { .compatible = "rockchip,rk3308-spi", }, + { .compatible = "rockchip,rk3328-spi", }, { .compatible = "rockchip,rk3368-spi", }, { .compatible = "rockchip,rk3399-spi", }, + { .compatible = "rockchip,rv1108-spi", }, { }, }; MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 85575d45901c..06192c9ea813 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -24,6 +24,7 @@ #include <linux/sh_dma.h> #include <linux/spi/spi.h> #include <linux/spi/rspi.h> +#include <linux/spinlock.h> #define RSPI_SPCR 0x00 /* Control Register */ #define RSPI_SSLP 0x01 /* Slave Select Polarity Register */ @@ -79,8 +80,7 @@ #define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */ /* SSLP - Slave Select Polarity Register */ -#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */ -#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */ +#define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */ /* SPPCR - Pin Control Register */ #define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */ @@ -181,7 +181,9 @@ struct rspi_data { void __iomem *addr; u32 max_speed_hz; struct spi_controller *ctlr; + struct platform_device *pdev; wait_queue_head_t wait; + spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */ struct clk *clk; u16 spcmd; u8 spsr; @@ -239,7 +241,7 @@ struct spi_ops { int (*set_config_register)(struct rspi_data *rspi, int access_size); int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer); - u16 mode_bits; + u16 extra_mode_bits; u16 flags; u16 fifo_size; u8 num_hw_ss; @@ -919,6 +921,29 @@ static int qspi_setup_sequencer(struct rspi_data *rspi, return 0; } +static int rspi_setup(struct spi_device *spi) +{ + struct rspi_data *rspi = spi_controller_get_devdata(spi->controller); + u8 sslp; + + if (spi->cs_gpiod) + return 0; + + pm_runtime_get_sync(&rspi->pdev->dev); + spin_lock_irq(&rspi->lock); + + sslp = rspi_read8(rspi, RSPI_SSLP); + if (spi->mode & SPI_CS_HIGH) + sslp |= SSLP_SSLP(spi->chip_select); + else + sslp &= ~SSLP_SSLP(spi->chip_select); + rspi_write8(rspi, sslp, RSPI_SSLP); + + spin_unlock_irq(&rspi->lock); + pm_runtime_put(&rspi->pdev->dev); + return 0; +} + static int rspi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { @@ -933,6 +958,8 @@ static int rspi_prepare_message(struct spi_controller *ctlr, rspi->spcmd |= SPCMD_CPOL; if (spi->mode & SPI_CPHA) rspi->spcmd |= SPCMD_CPHA; + if (spi->mode & SPI_LSB_FIRST) + rspi->spcmd |= SPCMD_LSBF; /* Configure slave signal to assert */ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs @@ -1122,7 +1149,6 @@ static int rspi_remove(struct platform_device *pdev) static const struct spi_ops rspi_ops = { .set_config_register = rspi_set_config_register, .transfer_one = rspi_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, .num_hw_ss = 2, @@ -1131,7 +1157,6 @@ static const struct spi_ops rspi_ops = { static const struct spi_ops rspi_rz_ops = { .set_config_register = rspi_rz_set_config_register, .transfer_one = rspi_rz_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ .num_hw_ss = 1, @@ -1140,8 +1165,7 @@ static const struct spi_ops rspi_rz_ops = { static const struct spi_ops qspi_ops = { .set_config_register = qspi_set_config_register, .transfer_one = qspi_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | - SPI_TX_DUAL | SPI_TX_QUAD | + .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, @@ -1249,16 +1273,20 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } + rspi->pdev = pdev; pm_runtime_enable(&pdev->dev); init_waitqueue_head(&rspi->wait); + spin_lock_init(&rspi->lock); ctlr->bus_num = pdev->id; + ctlr->setup = rspi_setup; ctlr->auto_runtime_pm = true; ctlr->transfer_one = ops->transfer_one; ctlr->prepare_message = rspi_prepare_message; ctlr->unprepare_message = rspi_unprepare_message; - ctlr->mode_bits = ops->mode_bits; + ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | + SPI_LOOP | ops->extra_mode_bits; ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; ctlr->use_gpio_descriptors = true; diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c index 2d6e37f25e2d..2cb3b611c294 100644 --- a/drivers/spi/spi-s3c24xx.c +++ b/drivers/spi/spi-s3c24xx.c @@ -227,7 +227,7 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) struct spi_fiq_code { u32 length; u32 ack_offset; - u8 data[0]; + u8 data[]; }; extern struct spi_fiq_code s3c24xx_spi_fiq_txrx; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4ef569b47aa6..d066f5144c3e 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -565,7 +565,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->io_base = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->io_base)) { ret = PTR_ERR(qspi->io_base); - goto err; + goto err_master_put; } qspi->phys_base = res->start; @@ -574,24 +574,26 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->mm_base = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->mm_base)) { ret = PTR_ERR(qspi->mm_base); - goto err; + goto err_master_put; } qspi->mm_size = resource_size(res); if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) { ret = -EINVAL; - goto err; + goto err_master_put; } irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_master_put; + } ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, dev_name(dev), qspi); if (ret) { dev_err(dev, "failed to request irq\n"); - goto err; + goto err_master_put; } init_completion(&qspi->data_completion); @@ -599,23 +601,27 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->clk = devm_clk_get(dev, NULL); if (IS_ERR(qspi->clk)) { ret = PTR_ERR(qspi->clk); - goto err; + goto err_master_put; } qspi->clk_rate = clk_get_rate(qspi->clk); if (!qspi->clk_rate) { ret = -EINVAL; - goto err; + goto err_master_put; } ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); - goto err; + goto err_master_put; } rstc = devm_reset_control_get_exclusive(dev, NULL); - if (!IS_ERR(rstc)) { + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + if (ret == -EPROBE_DEFER) + goto err_qspi_release; + } else { reset_control_assert(rstc); udelay(2); reset_control_deassert(rstc); @@ -625,7 +631,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qspi); ret = stm32_qspi_dma_setup(qspi); if (ret) - goto err; + goto err_qspi_release; mutex_init(&qspi->lock); @@ -641,8 +647,9 @@ static int stm32_qspi_probe(struct platform_device *pdev) if (!ret) return 0; -err: +err_qspi_release: stm32_qspi_release(qspi); +err_master_put: spi_master_put(qspi->ctrl); return ret; diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index e041f9c4ec47..44ac6eb3298d 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -175,7 +175,7 @@ #define SPI_DMA_MIN_BYTES 16 /** - * stm32_spi_reg - stm32 SPI register & bitfield desc + * struct stm32_spi_reg - stm32 SPI register & bitfield desc * @reg: register offset * @mask: bitfield mask * @shift: left shift @@ -187,16 +187,16 @@ struct stm32_spi_reg { }; /** - * stm32_spi_regspec - stm32 registers definition, compatible dependent data - * en: enable register and SPI enable bit - * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit - * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit - * cpol: clock polarity register and polarity bit - * cpha: clock phase register and phase bit - * lsb_first: LSB transmitted first register and bit - * br: baud rate register and bitfields - * rx: SPI RX data register - * tx: SPI TX data register + * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data + * @en: enable register and SPI enable bit + * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit + * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit + * @cpol: clock polarity register and polarity bit + * @cpha: clock phase register and phase bit + * @lsb_first: LSB transmitted first register and bit + * @br: baud rate register and bitfields + * @rx: SPI RX data register + * @tx: SPI TX data register */ struct stm32_spi_regspec { const struct stm32_spi_reg en; @@ -213,7 +213,7 @@ struct stm32_spi_regspec { struct stm32_spi; /** - * stm32_spi_cfg - stm32 compatible configuration data + * struct stm32_spi_cfg - stm32 compatible configuration data * @regs: registers descriptions * @get_fifo_size: routine to get fifo size * @get_bpw_mask: routine to get bits per word mask @@ -223,13 +223,13 @@ struct stm32_spi; * @set_mode: routine to configure registers to desired mode * @set_data_idleness: optional routine to configure registers to desired idle * time between frames (if driver has this functionality) - * set_number_of_data: optional routine to configure registers to desired + * @set_number_of_data: optional routine to configure registers to desired * number of data (if driver has this functionality) * @can_dma: routine to determine if the transfer is eligible for DMA use * @transfer_one_dma_start: routine to start transfer a single spi_transfer * using DMA - * @dma_rx cb: routine to call after DMA RX channel operation is complete - * @dma_tx cb: routine to call after DMA TX channel operation is complete + * @dma_rx_cb: routine to call after DMA RX channel operation is complete + * @dma_tx_cb: routine to call after DMA TX channel operation is complete * @transfer_one_irq: routine to configure interrupts for driver * @irq_handler_event: Interrupt handler for SPI controller events * @irq_handler_thread: thread of interrupt handler for SPI controller @@ -587,6 +587,7 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi) /** * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register * @spi: pointer to the spi controller data structure + * @flush: boolean indicating that FIFO should be flushed * * Write in rx_buf depends on remaining bytes to avoid to write beyond * rx_buf end. @@ -756,6 +757,9 @@ static void stm32h7_spi_disable(struct stm32_spi *spi) /** * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use + * @master: controller master interface + * @spi_dev: pointer to the spi device + * @transfer: pointer to spi transfer * * If driver has fifo and the current transfer size is greater than fifo size, * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes. @@ -974,6 +978,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) /** * stm32_spi_prepare_msg - set up the controller to transfer a single message + * @master: controller master interface + * @msg: pointer to spi message */ static int stm32_spi_prepare_msg(struct spi_master *master, struct spi_message *msg) @@ -1026,6 +1032,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master, /** * stm32f4_spi_dma_tx_cb - dma callback + * @data: pointer to the spi controller data structure * * DMA callback is called when the transfer is complete for DMA TX channel. */ @@ -1041,6 +1048,7 @@ static void stm32f4_spi_dma_tx_cb(void *data) /** * stm32f4_spi_dma_rx_cb - dma callback + * @data: pointer to the spi controller data structure * * DMA callback is called when the transfer is complete for DMA RX channel. */ @@ -1054,6 +1062,7 @@ static void stm32f4_spi_dma_rx_cb(void *data) /** * stm32h7_spi_dma_cb - dma callback + * @data: pointer to the spi controller data structure * * DMA callback is called when the transfer is complete or when an error * occurs. If the transfer is complete, EOT flag is raised. @@ -1079,6 +1088,9 @@ static void stm32h7_spi_dma_cb(void *data) /** * stm32_spi_dma_config - configure dma slave channel depending on current * transfer bits_per_word. + * @spi: pointer to the spi controller data structure + * @dma_conf: pointer to the dma_slave_config structure + * @dir: direction of the dma transfer */ static void stm32_spi_dma_config(struct stm32_spi *spi, struct dma_slave_config *dma_conf, @@ -1126,6 +1138,7 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, /** * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using * interrupts + * @spi: pointer to the spi controller data structure * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. @@ -1166,6 +1179,7 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) /** * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using * interrupts + * @spi: pointer to the spi controller data structure * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. @@ -1207,6 +1221,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) /** * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start * transfer using DMA + * @spi: pointer to the spi controller data structure */ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) { @@ -1227,6 +1242,7 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) /** * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start * transfer using DMA + * @spi: pointer to the spi controller data structure */ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) { @@ -1243,6 +1259,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) /** * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA + * @spi: pointer to the spi controller data structure + * @xfer: pointer to the spi_transfer structure * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. @@ -1405,7 +1423,7 @@ static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv) /** * stm32_spi_communication_type - return transfer communication type * @spi_dev: pointer to the spi device - * transfer: pointer to spi transfer + * @transfer: pointer to spi transfer */ static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, struct spi_transfer *transfer) @@ -1522,7 +1540,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) /** * stm32h7_spi_number_of_data - configure number of data at current transfer * @spi: pointer to the spi controller data structure - * @len: transfer length + * @nb_words: transfer length (in words) */ static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) { @@ -1546,6 +1564,9 @@ static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) * stm32_spi_transfer_one_setup - common setup to transfer a single * spi_transfer either using DMA or * interrupts. + * @spi: pointer to the spi controller data structure + * @spi_dev: pointer to the spi device + * @transfer: pointer to spi transfer */ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, struct spi_device *spi_dev, @@ -1625,6 +1646,9 @@ out: /** * stm32_spi_transfer_one - transfer a single spi_transfer + * @master: controller master interface + * @spi_dev: pointer to the spi device + * @transfer: pointer to spi transfer * * It must return 0 if the transfer is finished or 1 if the transfer is still * in progress. @@ -1658,6 +1682,8 @@ static int stm32_spi_transfer_one(struct spi_master *master, /** * stm32_spi_unprepare_msg - relax the hardware + * @master: controller master interface + * @msg: pointer to the spi message */ static int stm32_spi_unprepare_msg(struct spi_master *master, struct spi_message *msg) @@ -1671,6 +1697,7 @@ static int stm32_spi_unprepare_msg(struct spi_master *master, /** * stm32f4_spi_config - Configure SPI controller as SPI master + * @spi: pointer to the spi controller data structure */ static int stm32f4_spi_config(struct stm32_spi *spi) { @@ -1701,6 +1728,7 @@ static int stm32f4_spi_config(struct stm32_spi *spi) /** * stm32h7_spi_config - Configure SPI controller as SPI master + * @spi: pointer to the spi controller data structure */ static int stm32h7_spi_config(struct stm32_spi *spi) { diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 60c4de4e4485..7412a3042a8d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); - /* Dummy generic FIFO entry */ - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); - /* Manually start the generic FIFO command */ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) | diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 38b4c78df506..c92c89467e7e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -510,6 +510,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; spi->cs_gpio = -ENOENT; + spi->mode = ctlr->buswidth_override_bits; spin_lock_init(&spi->statistics.lock); @@ -1514,17 +1515,15 @@ void spi_take_timestamp_pre(struct spi_controller *ctlr, if (!xfer->ptp_sts) return; - if (xfer->timestamped_pre) + if (xfer->timestamped) return; - if (progress < xfer->ptp_sts_word_pre) + if (progress > xfer->ptp_sts_word_pre) return; /* Capture the resolution of the timestamp */ xfer->ptp_sts_word_pre = progress; - xfer->timestamped_pre = true; - if (irqs_off) { local_irq_save(ctlr->irq_flags); preempt_disable(); @@ -1553,7 +1552,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, if (!xfer->ptp_sts) return; - if (xfer->timestamped_post) + if (xfer->timestamped) return; if (progress < xfer->ptp_sts_word_post) @@ -1569,7 +1568,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, /* Capture the resolution of the timestamp */ xfer->ptp_sts_word_post = progress; - xfer->timestamped_post = true; + xfer->timestamped = true; } EXPORT_SYMBOL_GPL(spi_take_timestamp_post); @@ -1674,12 +1673,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } - if (unlikely(ctlr->ptp_sts_supported)) { - list_for_each_entry(xfer, &mesg->transfers, transfer_list) { - WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); - WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); - } - } + if (unlikely(ctlr->ptp_sts_supported)) + list_for_each_entry(xfer, &mesg->transfers, transfer_list) + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); spi_unmap_msg(ctlr, mesg); @@ -1955,13 +1951,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_CS_HIGH; /* Device speed */ - rc = of_property_read_u32(nc, "spi-max-frequency", &value); - if (rc) { - dev_err(&ctlr->dev, - "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); - return rc; - } - spi->max_speed_hz = value; + if (!of_property_read_u32(nc, "spi-max-frequency", &value)) + spi->max_speed_hz = value; return 0; } @@ -2181,9 +2172,10 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, return AE_NO_MEMORY; } + ACPI_COMPANION_SET(&spi->dev, adev); spi->max_speed_hz = lookup.max_speed_hz; - spi->mode = lookup.mode; + spi->mode |= lookup.mode; spi->irq = lookup.irq; spi->bits_per_word = lookup.bits_per_word; spi->chip_select = lookup.chip_select; @@ -2639,7 +2631,7 @@ int spi_register_controller(struct spi_controller *ctlr) if (ctlr->use_gpio_descriptors) { status = spi_get_gpio_descs(ctlr); if (status) - return status; + goto free_bus_id; /* * A controller using GPIO descriptors always * supports SPI_CS_HIGH if need be. @@ -2649,7 +2641,7 @@ int spi_register_controller(struct spi_controller *ctlr) /* Legacy code path for GPIOs from DT */ status = of_spi_get_gpio_numbers(ctlr); if (status) - return status; + goto free_bus_id; } } @@ -2657,17 +2649,14 @@ int spi_register_controller(struct spi_controller *ctlr) * Even if it's just one always-selected device, there must * be at least one chipselect. */ - if (!ctlr->num_chipselect) - return -EINVAL; + if (!ctlr->num_chipselect) { + status = -EINVAL; + goto free_bus_id; + } status = device_add(&ctlr->dev); - if (status < 0) { - /* free bus id */ - mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); - mutex_unlock(&board_lock); - goto done; - } + if (status < 0) + goto free_bus_id; dev_dbg(dev, "registered %s %s\n", spi_controller_is_slave(ctlr) ? "slave" : "master", dev_name(&ctlr->dev)); @@ -2683,11 +2672,7 @@ int spi_register_controller(struct spi_controller *ctlr) status = spi_controller_initialize_queue(ctlr); if (status) { device_del(&ctlr->dev); - /* free bus id */ - mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); - mutex_unlock(&board_lock); - goto done; + goto free_bus_id; } } /* add statistics */ @@ -2702,7 +2687,12 @@ int spi_register_controller(struct spi_controller *ctlr) /* Register devices from the device tree and ACPI */ of_register_spi_devices(ctlr); acpi_register_spi_devices(ctlr); -done: + return status; + +free_bus_id: + mutex_lock(&board_lock); + idr_remove(&spi_master_idr, ctlr->bus_num); + mutex_unlock(&board_lock); return status; } EXPORT_SYMBOL_GPL(spi_register_controller); @@ -4036,7 +4026,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) struct device *dev; dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); - return dev ? to_spi_device(dev) : NULL; + return to_spi_device(dev); } static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 1e217e3e9486..80dd1025b953 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -275,14 +275,14 @@ static int spidev_message(struct spidev_data *spidev, #ifdef VERBOSE dev_dbg(&spidev->spi->dev, " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n", - u_tmp->len, - u_tmp->rx_buf ? "rx " : "", - u_tmp->tx_buf ? "tx " : "", - u_tmp->cs_change ? "cs " : "", - u_tmp->bits_per_word ? : spidev->spi->bits_per_word, - u_tmp->delay_usecs, - u_tmp->word_delay_usecs, - u_tmp->speed_hz ? : spidev->spi->max_speed_hz); + k_tmp->len, + k_tmp->rx_buf ? "rx " : "", + k_tmp->tx_buf ? "tx " : "", + k_tmp->cs_change ? "cs " : "", + k_tmp->bits_per_word ? : spidev->spi->bits_per_word, + k_tmp->delay.value, + k_tmp->word_delay.value, + k_tmp->speed_hz ? : spidev->spi->max_speed_hz); #endif spi_message_add_tail(k_tmp, &msg); } @@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) else retval = get_user(tmp, (u32 __user *)arg); if (retval == 0) { + struct spi_controller *ctlr = spi->controller; u32 save = spi->mode; if (tmp & ~SPI_MODE_MASK) { @@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } + if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + tmp |= SPI_CS_HIGH; + tmp |= spi->mode & ~SPI_MODE_MASK; spi->mode = (u16)tmp; retval = spi_setup(spi); @@ -449,10 +454,11 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) spi->max_speed_hz = tmp; retval = spi_setup(spi); - if (retval >= 0) + if (retval == 0) { spidev->speed_hz = tmp; - else - dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); + dev_dbg(&spi->dev, "%d Hz (max)\n", + spidev->speed_hz); + } spi->max_speed_hz = save; } break; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index baccd7c883cc..a9939ff9490e 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -42,6 +42,10 @@ source "drivers/staging/rtl8188eu/Kconfig" source "drivers/staging/rts5208/Kconfig" +source "drivers/staging/octeon/Kconfig" + +source "drivers/staging/octeon-usb/Kconfig" + source "drivers/staging/vt6655/Kconfig" source "drivers/staging/vt6656/Kconfig" @@ -112,15 +116,8 @@ source "drivers/staging/fieldbus/Kconfig" source "drivers/staging/kpc2000/Kconfig" -source "drivers/staging/wusbcore/Kconfig" -source "drivers/staging/uwb/Kconfig" - -source "drivers/staging/exfat/Kconfig" - source "drivers/staging/qlge/Kconfig" -source "drivers/staging/hp/Kconfig" - source "drivers/staging/wfx/Kconfig" endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fdd03fd6e704..4d34198151b3 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -12,6 +12,8 @@ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += rtl8188eu/ obj-$(CONFIG_RTS5208) += rts5208/ obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/ +obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ +obj-$(CONFIG_OCTEON_USB) += octeon-usb/ obj-$(CONFIG_VT6655) += vt6655/ obj-$(CONFIG_VT6656) += vt6656/ obj-$(CONFIG_VME_BUS) += vme/ @@ -46,9 +48,5 @@ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ -obj-$(CONFIG_UWB) += uwb/ -obj-$(CONFIG_USB_WUSB) += wusbcore/ -obj-$(CONFIG_STAGING_EXFAT_FS) += exfat/ obj-$(CONFIG_QLGE) += qlge/ -obj-$(CONFIG_NET_VENDOR_HP) += hp/ obj-$(CONFIG_WFX) += wfx/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index d6d605d5cbde..8d8fd5c29349 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -14,14 +14,6 @@ config ASHMEM It is, in theory, a good memory allocator for low-memory devices, because it can discard shared memory units when under memory pressure. -config ANDROID_VSOC - tristate "Android Virtual SoC support" - depends on PCI_MSI - help - This option adds support for the Virtual SoC driver needed to boot - a 'cuttlefish' Android image inside QEmu. The driver interacts with - a QEmu ivshmem device. If built as a module, it will be called vsoc. - source "drivers/staging/android/ion/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 14bd9c6ce10d..3b66cd0b0ec5 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -4,4 +4,3 @@ ccflags-y += -I$(src) # needed for trace events obj-y += ion/ obj-$(CONFIG_ASHMEM) += ashmem.o -obj-$(CONFIG_ANDROID_VSOC) += vsoc.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 767dd98fd92d..80eccfaf6db5 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -9,14 +9,5 @@ ion/ - Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0) - Better test framework (integration with VGEM was suggested) -vsoc.c, uapi/vsoc_shm.h - - The current driver uses the same wait queue for all of the futexes in a - region. This will cause false wakeups in regions with a large number of - waiting threads. We should eventually use multiple queues and select the - queue based on the region. - - Add debugfs support for examining the permissions of regions. - - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been - superseded by the futex and is there for legacy reasons. - Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 5891d0744a76..8044510d8ec6 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot) _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); } +static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma) +{ + /* do not allow to mmap ashmem backing shmem file directly */ + return -EPERM; +} + +static unsigned long +ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) { + static struct file_operations vmfile_fops; struct ashmem_area *asma = file->private_data; int ret = 0; @@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } vmfile->f_mode |= FMODE_LSEEK; asma->file = vmfile; + /* + * override mmap operation of the vmfile so that it can't be + * remapped which would lead to creation of a new vma with no + * asma permission checks. Have to override get_unmapped_area + * as well to prevent VM_BUG_ON check for f_ops modification. + */ + if (!vmfile_fops.mmap) { + vmfile_fops = *vmfile->f_op; + vmfile_fops.mmap = ashmem_vmfile_mmap; + vmfile_fops.get_unmapped_area = + ashmem_vmfile_get_unmapped_area; + } + vmfile->f_op = &vmfile_fops; } get_file(asma->file); diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h deleted file mode 100644 index 6291fb24efb2..000000000000 --- a/drivers/staging/android/uapi/vsoc_shm.h +++ /dev/null @@ -1,295 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2017 Google, Inc. - * - */ - -#ifndef _UAPI_LINUX_VSOC_SHM_H -#define _UAPI_LINUX_VSOC_SHM_H - -#include <linux/types.h> - -/** - * A permission is a token that permits a receiver to read and/or write an area - * of memory within a Vsoc region. - * - * An fd_scoped permission grants both read and write access, and can be - * attached to a file description (see open(2)). - * Ownership of the area can then be shared by passing a file descriptor - * among processes. - * - * begin_offset and end_offset define the area of memory that is controlled by - * the permission. owner_offset points to a word, also in shared memory, that - * controls ownership of the area. - * - * ownership of the region expires when the associated file description is - * released. - * - * At most one permission can be attached to each file description. - * - * This is useful when implementing HALs like gralloc that scope and pass - * ownership of shared resources via file descriptors. - * - * The caller is responsibe for doing any fencing. - * - * The calling process will normally identify a currently free area of - * memory. It will construct a proposed fd_scoped_permission_arg structure: - * - * begin_offset and end_offset describe the area being claimed - * - * owner_offset points to the location in shared memory that indicates the - * owner of the area. - * - * owned_value is the value that will be stored in owner_offset iff the - * permission can be granted. It must be different than VSOC_REGION_FREE. - * - * Two fd_scoped_permission structures are compatible if they vary only by - * their owned_value fields. - * - * The driver ensures that, for any group of simultaneous callers proposing - * compatible fd_scoped_permissions, it will accept exactly one of the - * propopsals. The other callers will get a failure with errno of EAGAIN. - * - * A process receiving a file descriptor can identify the region being - * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl. - */ -struct fd_scoped_permission { - __u32 begin_offset; - __u32 end_offset; - __u32 owner_offset; - __u32 owned_value; -}; - -/* - * This value represents a free area of memory. The driver expects to see this - * value at owner_offset when creating a permission otherwise it will not do it, - * and will write this value back once the permission is no longer needed. - */ -#define VSOC_REGION_FREE ((__u32)0) - -/** - * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION - */ -struct fd_scoped_permission_arg { - struct fd_scoped_permission perm; - __s32 managed_region_fd; -}; - -#define VSOC_NODE_FREE ((__u32)0) - -/* - * Describes a signal table in shared memory. Each non-zero entry in the - * table indicates that the receiver should signal the futex at the given - * offset. Offsets are relative to the region, not the shared memory window. - * - * interrupt_signalled_offset is used to reliably signal interrupts across the - * vmm boundary. There are two roles: transmitter and receiver. For example, - * in the host_to_guest_signal_table the host is the transmitter and the - * guest is the receiver. The protocol is as follows: - * - * 1. The transmitter should convert the offset of the futex to an offset - * in the signal table [0, (1 << num_nodes_lg2)) - * The transmitter can choose any appropriate hashing algorithm, including - * hash = futex_offset & ((1 << num_nodes_lg2) - 1) - * - * 3. The transmitter should atomically compare and swap futex_offset with 0 - * at hash. There are 3 possible outcomes - * a. The swap fails because the futex_offset is already in the table. - * The transmitter should stop. - * b. Some other offset is in the table. This is a hash collision. The - * transmitter should move to another table slot and try again. One - * possible algorithm: - * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1) - * c. The swap worked. Continue below. - * - * 3. The transmitter atomically swaps 1 with the value at the - * interrupt_signalled_offset. There are two outcomes: - * a. The prior value was 1. In this case an interrupt has already been - * posted. The transmitter is done. - * b. The prior value was 0, indicating that the receiver may be sleeping. - * The transmitter will issue an interrupt. - * - * 4. On waking the receiver immediately exchanges a 0 with the - * interrupt_signalled_offset. If it receives a 0 then this a spurious - * interrupt. That may occasionally happen in the current protocol, but - * should be rare. - * - * 5. The receiver scans the signal table by atomicaly exchanging 0 at each - * location. If a non-zero offset is returned from the exchange the - * receiver wakes all sleepers at the given offset: - * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT); - * - * 6. The receiver thread then does a conditional wait, waking immediately - * if the value at interrupt_signalled_offset is non-zero. This catches cases - * here additional signals were posted while the table was being scanned. - * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT - * ioctl. - */ -struct vsoc_signal_table_layout { - /* log_2(Number of signal table entries) */ - __u32 num_nodes_lg2; - /* - * Offset to the first signal table entry relative to the start of the - * region - */ - __u32 futex_uaddr_table_offset; - /* - * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates - * that one or more offsets are currently posted in the table. - * semi-unique access to an entry in the table - */ - __u32 interrupt_signalled_offset; -}; - -#define VSOC_REGION_WHOLE ((__s32)0) -#define VSOC_DEVICE_NAME_SZ 16 - -/** - * Each HAL would (usually) talk to a single device region - * Mulitple entities care about these regions: - * - The ivshmem_server will populate the regions in shared memory - * - The guest kernel will read the region, create minor device nodes, and - * allow interested parties to register for FUTEX_WAKE events in the region - * - HALs will access via the minor device nodes published by the guest kernel - * - Host side processes will access the region via the ivshmem_server: - * 1. Pass name to ivshmem_server at a UNIX socket - * 2. ivshmemserver will reply with 2 fds: - * - host->guest doorbell fd - * - guest->host doorbell fd - * - fd for the shared memory region - * - region offset - * 3. Start a futex receiver thread on the doorbell fd pointed at the - * signal_nodes - */ -struct vsoc_device_region { - __u16 current_version; - __u16 min_compatible_version; - __u32 region_begin_offset; - __u32 region_end_offset; - __u32 offset_of_region_data; - struct vsoc_signal_table_layout guest_to_host_signal_table; - struct vsoc_signal_table_layout host_to_guest_signal_table; - /* Name of the device. Must always be terminated with a '\0', so - * the longest supported device name is 15 characters. - */ - char device_name[VSOC_DEVICE_NAME_SZ]; - /* There are two ways that permissions to access regions are handled: - * - When subdivided_by is VSOC_REGION_WHOLE, any process that can - * open the device node for the region gains complete access to it. - * - When subdivided is set processes that open the region cannot - * access it. Access to a sub-region must be established by invoking - * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region - * referenced in subdivided_by, providing a fileinstance - * (represented by a fd) opened on this region. - */ - __u32 managed_by; -}; - -/* - * The vsoc layout descriptor. - * The first 4K should be reserved for the shm header and region descriptors. - * The regions should be page aligned. - */ - -struct vsoc_shm_layout_descriptor { - __u16 major_version; - __u16 minor_version; - - /* size of the shm. This may be redundant but nice to have */ - __u32 size; - - /* number of shared memory regions */ - __u32 region_count; - - /* The offset to the start of region descriptors */ - __u32 vsoc_region_desc_offset; -}; - -/* - * This specifies the current version that should be stored in - * vsoc_shm_layout_descriptor.major_version and - * vsoc_shm_layout_descriptor.minor_version. - * It should be updated only if the vsoc_device_region and - * vsoc_shm_layout_descriptor structures have changed. - * Versioning within each region is transferred - * via the min_compatible_version and current_version fields in - * vsoc_device_region. The driver does not consult these fields: they are left - * for the HALs and host processes and will change independently of the layout - * version. - */ -#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2 -#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0 - -#define VSOC_CREATE_FD_SCOPED_PERMISSION \ - _IOW(0xF5, 0, struct fd_scoped_permission) -#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission) - -/* - * This is used to signal the host to scan the guest_to_host_signal_table - * for new futexes to wake. This sends an interrupt if one is not already - * in flight. - */ -#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2) - -/* - * When this returns the guest will scan host_to_guest_signal_table to - * check for new futexes to wake. - */ -/* TODO(ghartman): Consider moving this to the bottom half */ -#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3) - -/* - * Guest HALs will use this to retrieve the region description after - * opening their device node. - */ -#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region) - -/* - * Wake any threads that may be waiting for a host interrupt on this region. - * This is mostly used during shutdown. - */ -#define VSOC_SELF_INTERRUPT _IO(0xF5, 5) - -/* - * This is used to signal the host to scan the guest_to_host_signal_table - * for new futexes to wake. This sends an interrupt unconditionally. - */ -#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6) - -enum wait_types { - VSOC_WAIT_UNDEFINED = 0, - VSOC_WAIT_IF_EQUAL = 1, - VSOC_WAIT_IF_EQUAL_TIMEOUT = 2 -}; - -/* - * Wait for a condition to be true - * - * Note, this is sized and aligned so the 32 bit and 64 bit layouts are - * identical. - */ -struct vsoc_cond_wait { - /* Input: Offset of the 32 bit word to check */ - __u32 offset; - /* Input: Value that will be compared with the offset */ - __u32 value; - /* Monotonic time to wake at in seconds */ - __u64 wake_time_sec; - /* Input: Monotonic time to wait in nanoseconds */ - __u32 wake_time_nsec; - /* Input: Type of wait */ - __u32 wait_type; - /* Output: Number of times the thread woke before returning. */ - __u32 wakes; - /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit - * compatibility. - */ - __u32 reserved_1; -}; - -#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait) - -/* Wake any local threads waiting at the offset given in arg */ -#define VSOC_COND_WAKE _IO(0xF5, 8) - -#endif /* _UAPI_LINUX_VSOC_SHM_H */ diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c deleted file mode 100644 index 1240bb0317d9..000000000000 --- a/drivers/staging/android/vsoc.c +++ /dev/null @@ -1,1149 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * drivers/android/staging/vsoc.c - * - * Android Virtual System on a Chip (VSoC) driver - * - * Copyright (C) 2017 Google, Inc. - * - * Author: ghartman@google.com - * - * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory - * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca> - * - * Based on cirrusfb.c and 8139cp.c: - * Copyright 1999-2001 Jeff Garzik - * Copyright 2001-2004 Jeff Garzik - */ - -#include <linux/dma-mapping.h> -#include <linux/freezer.h> -#include <linux/futex.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/sched.h> -#include <linux/syscalls.h> -#include <linux/uaccess.h> -#include <linux/interrupt.h> -#include <linux/cdev.h> -#include <linux/file.h> -#include "uapi/vsoc_shm.h" - -#define VSOC_DEV_NAME "vsoc" - -/* - * Description of the ivshmem-doorbell PCI device used by QEmu. These - * constants follow docs/specs/ivshmem-spec.txt, which can be found in - * the QEmu repository. This was last reconciled with the version that - * came out with 2.8 - */ - -/* - * These constants are determined KVM Inter-VM shared memory device - * register offsets - */ -enum { - INTR_MASK = 0x00, /* Interrupt Mask */ - INTR_STATUS = 0x04, /* Interrupt Status */ - IV_POSITION = 0x08, /* VM ID */ - DOORBELL = 0x0c, /* Doorbell */ -}; - -static const int REGISTER_BAR; /* Equal to 0 */ -static const int MAX_REGISTER_BAR_LEN = 0x100; -/* - * The MSI-x BAR is not used directly. - * - * static const int MSI_X_BAR = 1; - */ -static const int SHARED_MEMORY_BAR = 2; - -struct vsoc_region_data { - char name[VSOC_DEVICE_NAME_SZ + 1]; - wait_queue_head_t interrupt_wait_queue; - /* TODO(b/73664181): Use multiple futex wait queues */ - wait_queue_head_t futex_wait_queue; - /* Flag indicating that an interrupt has been signalled by the host. */ - atomic_t *incoming_signalled; - /* Flag indicating the guest has signalled the host. */ - atomic_t *outgoing_signalled; - bool irq_requested; - bool device_created; -}; - -struct vsoc_device { - /* Kernel virtual address of REGISTER_BAR. */ - void __iomem *regs; - /* Physical address of SHARED_MEMORY_BAR. */ - phys_addr_t shm_phys_start; - /* Kernel virtual address of SHARED_MEMORY_BAR. */ - void __iomem *kernel_mapped_shm; - /* Size of the entire shared memory window in bytes. */ - size_t shm_size; - /* - * Pointer to the virtual address of the shared memory layout structure. - * This is probably identical to kernel_mapped_shm, but saving this - * here saves a lot of annoying casts. - */ - struct vsoc_shm_layout_descriptor *layout; - /* - * Points to a table of region descriptors in the kernel's virtual - * address space. Calculated from - * vsoc_shm_layout_descriptor.vsoc_region_desc_offset - */ - struct vsoc_device_region *regions; - /* Head of a list of permissions that have been granted. */ - struct list_head permissions; - struct pci_dev *dev; - /* Per-region (and therefore per-interrupt) information. */ - struct vsoc_region_data *regions_data; - /* - * Table of msi-x entries. This has to be separated from struct - * vsoc_region_data because the kernel deals with them as an array. - */ - struct msix_entry *msix_entries; - /* Mutex that protectes the permission list */ - struct mutex mtx; - /* Major number assigned by the kernel */ - int major; - /* Character device assigned by the kernel */ - struct cdev cdev; - /* Device class assigned by the kernel */ - struct class *class; - /* - * Flags that indicate what we've initialized. These are used to do an - * orderly cleanup of the device. - */ - bool enabled_device; - bool requested_regions; - bool cdev_added; - bool class_added; - bool msix_enabled; -}; - -static struct vsoc_device vsoc_dev; - -/* - * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions. - */ - -struct fd_scoped_permission_node { - struct fd_scoped_permission permission; - struct list_head list; -}; - -struct vsoc_private_data { - struct fd_scoped_permission_node *fd_scoped_permission_node; -}; - -static long vsoc_ioctl(struct file *, unsigned int, unsigned long); -static int vsoc_mmap(struct file *, struct vm_area_struct *); -static int vsoc_open(struct inode *, struct file *); -static int vsoc_release(struct inode *, struct file *); -static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *); -static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *); -static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin); -static int -do_create_fd_scoped_permission(struct vsoc_device_region *region_p, - struct fd_scoped_permission_node *np, - struct fd_scoped_permission_arg __user *arg); -static void -do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission *perm); -static long do_vsoc_describe_region(struct file *, - struct vsoc_device_region __user *); -static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off); - -/** - * Validate arguments on entry points to the driver. - */ -inline int vsoc_validate_inode(struct inode *inode) -{ - if (iminor(inode) >= vsoc_dev.layout->region_count) { - dev_err(&vsoc_dev.dev->dev, - "describe_region: invalid region %d\n", iminor(inode)); - return -ENODEV; - } - return 0; -} - -inline int vsoc_validate_filep(struct file *filp) -{ - int ret = vsoc_validate_inode(file_inode(filp)); - - if (ret) - return ret; - if (!filp->private_data) { - dev_err(&vsoc_dev.dev->dev, - "No private data on fd, region %d\n", - iminor(file_inode(filp))); - return -EBADFD; - } - return 0; -} - -/* Converts from shared memory offset to virtual address */ -static inline void *shm_off_to_virtual_addr(__u32 offset) -{ - return (void __force *)vsoc_dev.kernel_mapped_shm + offset; -} - -/* Converts from shared memory offset to physical address */ -static inline phys_addr_t shm_off_to_phys_addr(__u32 offset) -{ - return vsoc_dev.shm_phys_start + offset; -} - -/** - * Convenience functions to obtain the region from the inode or file. - * Dangerous to call before validating the inode/file. - */ -static -inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode) -{ - return &vsoc_dev.regions[iminor(inode)]; -} - -static -inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode) -{ - return vsoc_region_from_inode(file_inode(inode)); -} - -static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r) -{ - return r->region_end_offset - r->region_begin_offset; -} - -static const struct file_operations vsoc_ops = { - .owner = THIS_MODULE, - .open = vsoc_open, - .mmap = vsoc_mmap, - .read = vsoc_read, - .unlocked_ioctl = vsoc_ioctl, - .compat_ioctl = vsoc_ioctl, - .write = vsoc_write, - .llseek = vsoc_lseek, - .release = vsoc_release, -}; - -static struct pci_device_id vsoc_id_table[] = { - {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0}, -}; - -MODULE_DEVICE_TABLE(pci, vsoc_id_table); - -static void vsoc_remove_device(struct pci_dev *pdev); -static int vsoc_probe_device(struct pci_dev *pdev, - const struct pci_device_id *ent); - -static struct pci_driver vsoc_pci_driver = { - .name = "vsoc", - .id_table = vsoc_id_table, - .probe = vsoc_probe_device, - .remove = vsoc_remove_device, -}; - -static int -do_create_fd_scoped_permission(struct vsoc_device_region *region_p, - struct fd_scoped_permission_node *np, - struct fd_scoped_permission_arg __user *arg) -{ - struct file *managed_filp; - s32 managed_fd; - atomic_t *owner_ptr = NULL; - struct vsoc_device_region *managed_region_p; - - if (copy_from_user(&np->permission, - &arg->perm, sizeof(np->permission)) || - copy_from_user(&managed_fd, - &arg->managed_region_fd, sizeof(managed_fd))) { - return -EFAULT; - } - managed_filp = fdget(managed_fd).file; - /* Check that it's a valid fd, */ - if (!managed_filp || vsoc_validate_filep(managed_filp)) - return -EPERM; - /* EEXIST if the given fd already has a permission. */ - if (((struct vsoc_private_data *)managed_filp->private_data)-> - fd_scoped_permission_node) - return -EEXIST; - managed_region_p = vsoc_region_from_filep(managed_filp); - /* Check that the provided region is managed by this one */ - if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p) - return -EPERM; - /* The area must be well formed and have non-zero size */ - if (np->permission.begin_offset >= np->permission.end_offset) - return -EINVAL; - /* The area must fit in the memory window */ - if (np->permission.end_offset > - vsoc_device_region_size(managed_region_p)) - return -ERANGE; - /* The area must be in the region data section */ - if (np->permission.begin_offset < - managed_region_p->offset_of_region_data) - return -ERANGE; - /* The area must be page aligned */ - if (!PAGE_ALIGNED(np->permission.begin_offset) || - !PAGE_ALIGNED(np->permission.end_offset)) - return -EINVAL; - /* Owner offset must be naturally aligned in the window */ - if (np->permission.owner_offset & - (sizeof(np->permission.owner_offset) - 1)) - return -EINVAL; - /* The owner flag must reside in the owner memory */ - if (np->permission.owner_offset + sizeof(np->permission.owner_offset) > - vsoc_device_region_size(region_p)) - return -ERANGE; - /* The owner flag must reside in the data section */ - if (np->permission.owner_offset < region_p->offset_of_region_data) - return -EINVAL; - /* The owner value must change to claim the memory */ - if (np->permission.owned_value == VSOC_REGION_FREE) - return -EINVAL; - owner_ptr = - (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset + - np->permission.owner_offset); - /* We've already verified that this is in the shared memory window, so - * it should be safe to write to this address. - */ - if (atomic_cmpxchg(owner_ptr, - VSOC_REGION_FREE, - np->permission.owned_value) != VSOC_REGION_FREE) { - return -EBUSY; - } - ((struct vsoc_private_data *)managed_filp->private_data)-> - fd_scoped_permission_node = np; - /* The file offset needs to be adjusted if the calling - * process did any read/write operations on the fd - * before creating the permission. - */ - if (managed_filp->f_pos) { - if (managed_filp->f_pos > np->permission.end_offset) { - /* If the offset is beyond the permission end, set it - * to the end. - */ - managed_filp->f_pos = np->permission.end_offset; - } else { - /* If the offset is within the permission interval - * keep it there otherwise reset it to zero. - */ - if (managed_filp->f_pos < np->permission.begin_offset) { - managed_filp->f_pos = 0; - } else { - managed_filp->f_pos -= - np->permission.begin_offset; - } - } - } - return 0; -} - -static void -do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission_node *node) -{ - if (node) { - do_destroy_fd_scoped_permission(owner_region_p, - &node->permission); - mutex_lock(&vsoc_dev.mtx); - list_del(&node->list); - mutex_unlock(&vsoc_dev.mtx); - kfree(node); - } -} - -static void -do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission *perm) -{ - atomic_t *owner_ptr = NULL; - int prev = 0; - - if (!perm) - return; - owner_ptr = (atomic_t *)shm_off_to_virtual_addr - (owner_region_p->region_begin_offset + perm->owner_offset); - prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE); - if (prev != perm->owned_value) - dev_err(&vsoc_dev.dev->dev, - "%x-%x: owner (%s) %x: expected to be %x was %x", - perm->begin_offset, perm->end_offset, - owner_region_p->device_name, perm->owner_offset, - perm->owned_value, prev); -} - -static long do_vsoc_describe_region(struct file *filp, - struct vsoc_device_region __user *dest) -{ - struct vsoc_device_region *region_p; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - region_p = vsoc_region_from_filep(filp); - if (copy_to_user(dest, region_p, sizeof(*region_p))) - return -EFAULT; - return 0; -} - -/** - * Implements the inner logic of cond_wait. Copies to and from userspace are - * done in the helper function below. - */ -static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) -{ - DEFINE_WAIT(wait); - u32 region_number = iminor(file_inode(filp)); - struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; - struct hrtimer_sleeper timeout, *to = NULL; - int ret = 0; - struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); - atomic_t *address = NULL; - ktime_t wake_time; - - /* Ensure that the offset is aligned */ - if (arg->offset & (sizeof(uint32_t) - 1)) - return -EADDRNOTAVAIL; - /* Ensure that the offset is within shared memory */ - if (((uint64_t)arg->offset) + region_p->region_begin_offset + - sizeof(uint32_t) > region_p->region_end_offset) - return -E2BIG; - address = shm_off_to_virtual_addr(region_p->region_begin_offset + - arg->offset); - - /* Ensure that the type of wait is valid */ - switch (arg->wait_type) { - case VSOC_WAIT_IF_EQUAL: - break; - case VSOC_WAIT_IF_EQUAL_TIMEOUT: - to = &timeout; - break; - default: - return -EINVAL; - } - - if (to) { - /* Copy the user-supplied timesec into the kernel structure. - * We do things this way to flatten differences between 32 bit - * and 64 bit timespecs. - */ - if (arg->wake_time_nsec >= NSEC_PER_SEC) - return -EINVAL; - wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); - - hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - hrtimer_set_expires_range_ns(&to->timer, wake_time, - current->timer_slack_ns); - } - - while (1) { - prepare_to_wait(&data->futex_wait_queue, &wait, - TASK_INTERRUPTIBLE); - /* - * Check the sentinel value after prepare_to_wait. If the value - * changes after this check the writer will call signal, - * changing the task state from INTERRUPTIBLE to RUNNING. That - * will ensure that schedule() will eventually schedule this - * task. - */ - if (atomic_read(address) != arg->value) { - ret = 0; - break; - } - if (to) { - hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); - if (likely(to->task)) - freezable_schedule(); - hrtimer_cancel(&to->timer); - if (!to->task) { - ret = -ETIMEDOUT; - break; - } - } else { - freezable_schedule(); - } - /* Count the number of times that we woke up. This is useful - * for unit testing. - */ - ++arg->wakes; - if (signal_pending(current)) { - ret = -EINTR; - break; - } - } - finish_wait(&data->futex_wait_queue, &wait); - if (to) - destroy_hrtimer_on_stack(&to->timer); - return ret; -} - -/** - * Handles the details of copying from/to userspace to ensure that the copies - * happen on all of the return paths of cond_wait. - */ -static int do_vsoc_cond_wait(struct file *filp, - struct vsoc_cond_wait __user *untrusted_in) -{ - struct vsoc_cond_wait arg; - int rval = 0; - - if (copy_from_user(&arg, untrusted_in, sizeof(arg))) - return -EFAULT; - /* wakes is an out parameter. Initialize it to something sensible. */ - arg.wakes = 0; - rval = handle_vsoc_cond_wait(filp, &arg); - if (copy_to_user(untrusted_in, &arg, sizeof(arg))) - return -EFAULT; - return rval; -} - -static int do_vsoc_cond_wake(struct file *filp, uint32_t offset) -{ - struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); - u32 region_number = iminor(file_inode(filp)); - struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; - /* Ensure that the offset is aligned */ - if (offset & (sizeof(uint32_t) - 1)) - return -EADDRNOTAVAIL; - /* Ensure that the offset is within shared memory */ - if (((uint64_t)offset) + region_p->region_begin_offset + - sizeof(uint32_t) > region_p->region_end_offset) - return -E2BIG; - /* - * TODO(b/73664181): Use multiple futex wait queues. - * We need to wake every sleeper when the condition changes. Typically - * only a single thread will be waiting on the condition, but there - * are exceptions. The worst case is about 10 threads. - */ - wake_up_interruptible_all(&data->futex_wait_queue); - return 0; -} - -static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int rv = 0; - struct vsoc_device_region *region_p; - u32 reg_num; - struct vsoc_region_data *reg_data; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - region_p = vsoc_region_from_filep(filp); - reg_num = iminor(file_inode(filp)); - reg_data = vsoc_dev.regions_data + reg_num; - switch (cmd) { - case VSOC_CREATE_FD_SCOPED_PERMISSION: - { - struct fd_scoped_permission_node *node = NULL; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - /* We can't allocate memory for the permission */ - if (!node) - return -ENOMEM; - INIT_LIST_HEAD(&node->list); - rv = do_create_fd_scoped_permission - (region_p, - node, - (struct fd_scoped_permission_arg __user *)arg); - if (!rv) { - mutex_lock(&vsoc_dev.mtx); - list_add(&node->list, &vsoc_dev.permissions); - mutex_unlock(&vsoc_dev.mtx); - } else { - kfree(node); - return rv; - } - } - break; - - case VSOC_GET_FD_SCOPED_PERMISSION: - { - struct fd_scoped_permission_node *node = - ((struct vsoc_private_data *)filp->private_data)-> - fd_scoped_permission_node; - if (!node) - return -ENOENT; - if (copy_to_user - ((struct fd_scoped_permission __user *)arg, - &node->permission, sizeof(node->permission))) - return -EFAULT; - } - break; - - case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST: - if (!atomic_xchg(reg_data->outgoing_signalled, 1)) { - writel(reg_num, vsoc_dev.regs + DOORBELL); - return 0; - } else { - return -EBUSY; - } - break; - - case VSOC_SEND_INTERRUPT_TO_HOST: - writel(reg_num, vsoc_dev.regs + DOORBELL); - return 0; - case VSOC_WAIT_FOR_INCOMING_INTERRUPT: - wait_event_interruptible - (reg_data->interrupt_wait_queue, - (atomic_read(reg_data->incoming_signalled) != 0)); - break; - - case VSOC_DESCRIBE_REGION: - return do_vsoc_describe_region - (filp, - (struct vsoc_device_region __user *)arg); - - case VSOC_SELF_INTERRUPT: - atomic_set(reg_data->incoming_signalled, 1); - wake_up_interruptible(®_data->interrupt_wait_queue); - break; - - case VSOC_COND_WAIT: - return do_vsoc_cond_wait(filp, - (struct vsoc_cond_wait __user *)arg); - case VSOC_COND_WAKE: - return do_vsoc_cond_wake(filp, arg); - - default: - return -EINVAL; - } - return 0; -} - -static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len, - loff_t *poffset) -{ - __u32 area_off; - const void *area_p; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - area_p = shm_off_to_virtual_addr(area_off); - area_p += *poffset; - area_len -= *poffset; - if (area_len <= 0) - return 0; - if (area_len < len) - len = area_len; - if (copy_to_user(buffer, area_p, len)) - return -EFAULT; - *poffset += len; - return len; -} - -static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin) -{ - ssize_t area_len = 0; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, NULL); - switch (origin) { - case SEEK_SET: - break; - - case SEEK_CUR: - if (offset > 0 && offset + filp->f_pos < 0) - return -EOVERFLOW; - offset += filp->f_pos; - break; - - case SEEK_END: - if (offset > 0 && offset + area_len < 0) - return -EOVERFLOW; - offset += area_len; - break; - - case SEEK_DATA: - if (offset >= area_len) - return -EINVAL; - if (offset < 0) - offset = 0; - break; - - case SEEK_HOLE: - /* Next hole is always the end of the region, unless offset is - * beyond that - */ - if (offset < area_len) - offset = area_len; - break; - - default: - return -EINVAL; - } - - if (offset < 0 || offset > area_len) - return -EINVAL; - filp->f_pos = offset; - - return offset; -} - -static ssize_t vsoc_write(struct file *filp, const char __user *buffer, - size_t len, loff_t *poffset) -{ - __u32 area_off; - void *area_p; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - area_p = shm_off_to_virtual_addr(area_off); - area_p += *poffset; - area_len -= *poffset; - if (area_len <= 0) - return 0; - if (area_len < len) - len = area_len; - if (copy_from_user(area_p, buffer, len)) - return -EFAULT; - *poffset += len; - return len; -} - -static irqreturn_t vsoc_interrupt(int irq, void *region_data_v) -{ - struct vsoc_region_data *region_data = - (struct vsoc_region_data *)region_data_v; - int reg_num = region_data - vsoc_dev.regions_data; - - if (unlikely(!region_data)) - return IRQ_NONE; - - if (unlikely(reg_num < 0 || - reg_num >= vsoc_dev.layout->region_count)) { - dev_err(&vsoc_dev.dev->dev, - "invalid irq @%p reg_num=0x%04x\n", - region_data, reg_num); - return IRQ_NONE; - } - if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) { - dev_err(&vsoc_dev.dev->dev, - "irq not aligned @%p reg_num=0x%04x\n", - region_data, reg_num); - return IRQ_NONE; - } - wake_up_interruptible(®ion_data->interrupt_wait_queue); - return IRQ_HANDLED; -} - -static int vsoc_probe_device(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int result; - int i; - resource_size_t reg_size; - dev_t devt; - - vsoc_dev.dev = pdev; - result = pci_enable_device(pdev); - if (result) { - dev_err(&pdev->dev, - "pci_enable_device failed %s: error %d\n", - pci_name(pdev), result); - return result; - } - vsoc_dev.enabled_device = true; - result = pci_request_regions(pdev, "vsoc"); - if (result < 0) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - vsoc_remove_device(pdev); - return -EBUSY; - } - vsoc_dev.requested_regions = true; - /* Set up the control registers in BAR 0 */ - reg_size = pci_resource_len(pdev, REGISTER_BAR); - if (reg_size > MAX_REGISTER_BAR_LEN) - vsoc_dev.regs = - pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN); - else - vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size); - - if (!vsoc_dev.regs) { - dev_err(&pdev->dev, - "cannot map registers of size %zu\n", - (size_t)reg_size); - vsoc_remove_device(pdev); - return -EBUSY; - } - - /* Map the shared memory in BAR 2 */ - vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR); - vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR); - - dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n", - &vsoc_dev.shm_phys_start, vsoc_dev.shm_size); - vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0); - if (!vsoc_dev.kernel_mapped_shm) { - dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n"); - vsoc_remove_device(pdev); - return -EBUSY; - } - - vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *) - vsoc_dev.kernel_mapped_shm; - dev_info(&pdev->dev, "major_version: %d\n", - vsoc_dev.layout->major_version); - dev_info(&pdev->dev, "minor_version: %d\n", - vsoc_dev.layout->minor_version); - dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size); - dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count); - if (vsoc_dev.layout->major_version != - CURRENT_VSOC_LAYOUT_MAJOR_VERSION) { - dev_err(&vsoc_dev.dev->dev, - "driver supports only major_version %d\n", - CURRENT_VSOC_LAYOUT_MAJOR_VERSION); - vsoc_remove_device(pdev); - return -EBUSY; - } - result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count, - VSOC_DEV_NAME); - if (result) { - dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n"); - vsoc_remove_device(pdev); - return -EBUSY; - } - vsoc_dev.major = MAJOR(devt); - cdev_init(&vsoc_dev.cdev, &vsoc_ops); - vsoc_dev.cdev.owner = THIS_MODULE; - result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count); - if (result) { - dev_err(&vsoc_dev.dev->dev, "cdev_add error\n"); - vsoc_remove_device(pdev); - return -EBUSY; - } - vsoc_dev.cdev_added = true; - vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME); - if (IS_ERR(vsoc_dev.class)) { - dev_err(&vsoc_dev.dev->dev, "class_create failed\n"); - vsoc_remove_device(pdev); - return PTR_ERR(vsoc_dev.class); - } - vsoc_dev.class_added = true; - vsoc_dev.regions = (struct vsoc_device_region __force *) - ((void *)vsoc_dev.layout + - vsoc_dev.layout->vsoc_region_desc_offset); - vsoc_dev.msix_entries = - kcalloc(vsoc_dev.layout->region_count, - sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL); - if (!vsoc_dev.msix_entries) { - dev_err(&vsoc_dev.dev->dev, - "unable to allocate msix_entries\n"); - vsoc_remove_device(pdev); - return -ENOSPC; - } - vsoc_dev.regions_data = - kcalloc(vsoc_dev.layout->region_count, - sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL); - if (!vsoc_dev.regions_data) { - dev_err(&vsoc_dev.dev->dev, - "unable to allocate regions' data\n"); - vsoc_remove_device(pdev); - return -ENOSPC; - } - for (i = 0; i < vsoc_dev.layout->region_count; ++i) - vsoc_dev.msix_entries[i].entry = i; - - result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries, - vsoc_dev.layout->region_count); - if (result) { - dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result); - vsoc_remove_device(pdev); - return -ENOSPC; - } - /* Check that all regions are well formed */ - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - const struct vsoc_device_region *region = vsoc_dev.regions + i; - - if (!PAGE_ALIGNED(region->region_begin_offset) || - !PAGE_ALIGNED(region->region_end_offset)) { - dev_err(&vsoc_dev.dev->dev, - "region %d not aligned (%x:%x)", i, - region->region_begin_offset, - region->region_end_offset); - vsoc_remove_device(pdev); - return -EFAULT; - } - if (region->region_begin_offset >= region->region_end_offset || - region->region_end_offset > vsoc_dev.shm_size) { - dev_err(&vsoc_dev.dev->dev, - "region %d offsets are wrong: %x %x %zx", - i, region->region_begin_offset, - region->region_end_offset, vsoc_dev.shm_size); - vsoc_remove_device(pdev); - return -EFAULT; - } - if (region->managed_by >= vsoc_dev.layout->region_count) { - dev_err(&vsoc_dev.dev->dev, - "region %d has invalid owner: %u", - i, region->managed_by); - vsoc_remove_device(pdev); - return -EFAULT; - } - } - vsoc_dev.msix_enabled = true; - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - const struct vsoc_device_region *region = vsoc_dev.regions + i; - size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1; - const struct vsoc_signal_table_layout *h_to_g_signal_table = - ®ion->host_to_guest_signal_table; - const struct vsoc_signal_table_layout *g_to_h_signal_table = - ®ion->guest_to_host_signal_table; - - vsoc_dev.regions_data[i].name[name_sz] = '\0'; - memcpy(vsoc_dev.regions_data[i].name, region->device_name, - name_sz); - dev_info(&pdev->dev, "region %d name=%s\n", - i, vsoc_dev.regions_data[i].name); - init_waitqueue_head - (&vsoc_dev.regions_data[i].interrupt_wait_queue); - init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue); - vsoc_dev.regions_data[i].incoming_signalled = - shm_off_to_virtual_addr(region->region_begin_offset) + - h_to_g_signal_table->interrupt_signalled_offset; - vsoc_dev.regions_data[i].outgoing_signalled = - shm_off_to_virtual_addr(region->region_begin_offset) + - g_to_h_signal_table->interrupt_signalled_offset; - result = request_irq(vsoc_dev.msix_entries[i].vector, - vsoc_interrupt, 0, - vsoc_dev.regions_data[i].name, - vsoc_dev.regions_data + i); - if (result) { - dev_info(&pdev->dev, - "request_irq failed irq=%d vector=%d\n", - i, vsoc_dev.msix_entries[i].vector); - vsoc_remove_device(pdev); - return -ENOSPC; - } - vsoc_dev.regions_data[i].irq_requested = true; - if (!device_create(vsoc_dev.class, NULL, - MKDEV(vsoc_dev.major, i), - NULL, vsoc_dev.regions_data[i].name)) { - dev_err(&vsoc_dev.dev->dev, "device_create failed\n"); - vsoc_remove_device(pdev); - return -EBUSY; - } - vsoc_dev.regions_data[i].device_created = true; - } - return 0; -} - -/* - * This should undo all of the allocations in the probe function in reverse - * order. - * - * Notes: - * - * The device may have been partially initialized, so double check - * that the allocations happened. - * - * This function may be called multiple times, so mark resources as freed - * as they are deallocated. - */ -static void vsoc_remove_device(struct pci_dev *pdev) -{ - int i; - /* - * pdev is the first thing to be set on probe and the last thing - * to be cleared here. If it's NULL then there is no cleanup. - */ - if (!pdev || !vsoc_dev.dev) - return; - dev_info(&pdev->dev, "remove_device\n"); - if (vsoc_dev.regions_data) { - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - if (vsoc_dev.regions_data[i].device_created) { - device_destroy(vsoc_dev.class, - MKDEV(vsoc_dev.major, i)); - vsoc_dev.regions_data[i].device_created = false; - } - if (vsoc_dev.regions_data[i].irq_requested) - free_irq(vsoc_dev.msix_entries[i].vector, NULL); - vsoc_dev.regions_data[i].irq_requested = false; - } - kfree(vsoc_dev.regions_data); - vsoc_dev.regions_data = NULL; - } - if (vsoc_dev.msix_enabled) { - pci_disable_msix(pdev); - vsoc_dev.msix_enabled = false; - } - kfree(vsoc_dev.msix_entries); - vsoc_dev.msix_entries = NULL; - vsoc_dev.regions = NULL; - if (vsoc_dev.class_added) { - class_destroy(vsoc_dev.class); - vsoc_dev.class_added = false; - } - if (vsoc_dev.cdev_added) { - cdev_del(&vsoc_dev.cdev); - vsoc_dev.cdev_added = false; - } - if (vsoc_dev.major && vsoc_dev.layout) { - unregister_chrdev_region(MKDEV(vsoc_dev.major, 0), - vsoc_dev.layout->region_count); - vsoc_dev.major = 0; - } - vsoc_dev.layout = NULL; - if (vsoc_dev.kernel_mapped_shm) { - pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm); - vsoc_dev.kernel_mapped_shm = NULL; - } - if (vsoc_dev.regs) { - pci_iounmap(pdev, vsoc_dev.regs); - vsoc_dev.regs = NULL; - } - if (vsoc_dev.requested_regions) { - pci_release_regions(pdev); - vsoc_dev.requested_regions = false; - } - if (vsoc_dev.enabled_device) { - pci_disable_device(pdev); - vsoc_dev.enabled_device = false; - } - /* Do this last: it indicates that the device is not initialized. */ - vsoc_dev.dev = NULL; -} - -static void __exit vsoc_cleanup_module(void) -{ - vsoc_remove_device(vsoc_dev.dev); - pci_unregister_driver(&vsoc_pci_driver); -} - -static int __init vsoc_init_module(void) -{ - int err = -ENOMEM; - - INIT_LIST_HEAD(&vsoc_dev.permissions); - mutex_init(&vsoc_dev.mtx); - - err = pci_register_driver(&vsoc_pci_driver); - if (err < 0) - return err; - return 0; -} - -static int vsoc_open(struct inode *inode, struct file *filp) -{ - /* Can't use vsoc_validate_filep because filp is still incomplete */ - int ret = vsoc_validate_inode(inode); - - if (ret) - return ret; - filp->private_data = - kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL); - if (!filp->private_data) - return -ENOMEM; - return 0; -} - -static int vsoc_release(struct inode *inode, struct file *filp) -{ - struct vsoc_private_data *private_data = NULL; - struct fd_scoped_permission_node *node = NULL; - struct vsoc_device_region *owner_region_p = NULL; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - private_data = (struct vsoc_private_data *)filp->private_data; - if (!private_data) - return 0; - - node = private_data->fd_scoped_permission_node; - if (node) { - owner_region_p = vsoc_region_from_inode(inode); - if (owner_region_p->managed_by != VSOC_REGION_WHOLE) { - owner_region_p = - &vsoc_dev.regions[owner_region_p->managed_by]; - } - do_destroy_fd_scoped_permission_node(owner_region_p, node); - private_data->fd_scoped_permission_node = NULL; - } - kfree(private_data); - filp->private_data = NULL; - - return 0; -} - -/* - * Returns the device relative offset and length of the area specified by the - * fd scoped permission. If there is no fd scoped permission set, a default - * permission covering the entire region is assumed, unless the region is owned - * by another one, in which case the default is a permission with zero size. - */ -static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset) -{ - __u32 off = 0; - ssize_t length = 0; - struct vsoc_device_region *region_p; - struct fd_scoped_permission *perm; - - region_p = vsoc_region_from_filep(filp); - off = region_p->region_begin_offset; - perm = &((struct vsoc_private_data *)filp->private_data)-> - fd_scoped_permission_node->permission; - if (perm) { - off += perm->begin_offset; - length = perm->end_offset - perm->begin_offset; - } else if (region_p->managed_by == VSOC_REGION_WHOLE) { - /* No permission set and the regions is not owned by another, - * default to full region access. - */ - length = vsoc_device_region_size(region_p); - } else { - /* return zero length, access is denied. */ - length = 0; - } - if (area_offset) - *area_offset = off; - return length; -} - -static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long len = vma->vm_end - vma->vm_start; - __u32 area_off; - phys_addr_t mem_off; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - /* Add the requested offset */ - area_off += (vma->vm_pgoff << PAGE_SHIFT); - area_len -= (vma->vm_pgoff << PAGE_SHIFT); - if (area_len < len) - return -EINVAL; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - mem_off = shm_off_to_phys_addr(area_off); - if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT, - len, vma->vm_page_prot)) - return -EAGAIN; - return 0; -} - -module_init(vsoc_init_module); -module_exit(vsoc_cleanup_module); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Greg Hartman <ghartman@google.com>"); -MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device"); -MODULE_VERSION("1.0"); diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index e15e33ed94ae..89dc84d3c803 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -484,14 +484,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev, s->async->events |= COMEDI_CB_EOA; return; } -#if 0 - /* clear the dual dma flag, making this the last dma segment */ - /* XXX probably wrong */ - if (!devpriv->ntrig) { - devpriv->supcsr &= ~DT2821_SUPCSR_DDMA; - outw(devpriv->supcsr, dev->iobase + DT2821_SUPCSR_REG); - } -#endif + /* restart the channel */ dt282x_prep_ai_dma(dev, dma->cur_dma, 0); @@ -534,28 +527,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) s_ao->async->events |= COMEDI_CB_ERROR; handled = 1; } -#if 0 - if (adcsr & DT2821_ADCSR_ADDONE) { - unsigned short data; - - data = inw(dev->iobase + DT2821_ADDAT_REG); - data &= s->maxdata; - if (devpriv->ad_2scomp) - data = comedi_offset_munge(s, data); - comedi_buf_write_samples(s, &data, 1); - - devpriv->nread--; - if (!devpriv->nread) { - s->async->events |= COMEDI_CB_EOA; - } else { - if (supcsr & DT2821_SUPCSR_SCDN) - outw(devpriv->supcsr | DT2821_SUPCSR_STRIG, - dev->iobase + DT2821_SUPCSR_REG); - } - handled = 1; - } -#endif comedi_handle_events(dev, s); if (s_ao) comedi_handle_events(dev, s_ao); diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index f7c365b70106..011e19161b78 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -439,9 +439,8 @@ static int dt3k_ai_cmdtest(struct comedi_device *dev, if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->convert_arg * cmd->scan_end_arg; - err |= comedi_check_trigger_arg_min(&cmd-> - scan_begin_arg, - arg); + err |= comedi_check_trigger_arg_min( + &cmd->scan_begin_arg, arg); } } diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index 4ee9b260eab0..75d5c9c24596 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -1035,7 +1035,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev, ni_660x_init_tio_chips(dev, board->n_chips); /* prepare the device for globally-named routes. */ - if (ni_assign_device_routes("ni_660x", board->name, + if (ni_assign_device_routes("ni_660x", board->name, NULL, &devpriv->routing_tables) < 0) { dev_warn(dev->class_dev, "%s: %s device has no signal routing table.\n", __func__, board->name); diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c index 68ad9676f962..0bca7d752015 100644 --- a/drivers/staging/comedi/drivers/ni_atmio16d.c +++ b/drivers/staging/comedi/drivers/ni_atmio16d.c @@ -266,19 +266,9 @@ static int atmio16d_ai_cmdtest(struct comedi_device *dev, if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); - } else { -#if 0 - /* external trigger */ - /* should be level/edge, hi/lo specification here */ - err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); -#endif } err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000); -#if 0 - err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER); -#endif - err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c index 406952f5521d..ce0f85026277 100644 --- a/drivers/staging/comedi/drivers/ni_labpc_common.c +++ b/drivers/staging/comedi/drivers/ni_labpc_common.c @@ -560,14 +560,13 @@ static int labpc_ai_cmdtest(struct comedi_device *dev, /* make sure scan timing is not too fast */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->convert_src == TRIG_TIMER) { - err |= comedi_check_trigger_arg_min(&cmd-> - scan_begin_arg, - cmd->convert_arg * - cmd->chanlist_len); + err |= comedi_check_trigger_arg_min( + &cmd->scan_begin_arg, + cmd->convert_arg * cmd->chanlist_len); } - err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, - board->ai_speed * - cmd->chanlist_len); + err |= comedi_check_trigger_arg_min( + &cmd->scan_begin_arg, + board->ai_speed * cmd->chanlist_len); } switch (cmd->stop_src) { diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index f98e3ae27bff..d99f4065b96d 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -2199,9 +2199,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) break; case TRIG_EXT: ai_trig |= NISTC_AI_TRIG_START1_SEL( - ni_get_reg_value_roffs(CR_CHAN(cmd->start_arg), - NI_AI_StartTrigger, - &devpriv->routing_tables, 1)); + ni_get_reg_value_roffs( + CR_CHAN(cmd->start_arg), + NI_AI_StartTrigger, + &devpriv->routing_tables, 1)); if (cmd->start_arg & CR_INVERT) ai_trig |= NISTC_AI_TRIG_START1_POLARITY; @@ -2311,10 +2312,12 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) (cmd->scan_begin_arg & ~CR_EDGE) != (cmd->convert_arg & ~CR_EDGE)) start_stop_select |= NISTC_AI_START_SYNC; + start_stop_select |= NISTC_AI_START_SEL( - ni_get_reg_value_roffs(CR_CHAN(cmd->scan_begin_arg), - NI_AI_SampleClock, - &devpriv->routing_tables, 1)); + ni_get_reg_value_roffs( + CR_CHAN(cmd->scan_begin_arg), + NI_AI_SampleClock, + &devpriv->routing_tables, 1)); ni_stc_writew(dev, start_stop_select, NISTC_AI_START_STOP_REG); break; } @@ -2343,9 +2346,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) break; case TRIG_EXT: mode1 |= NISTC_AI_MODE1_CONVERT_SRC( - ni_get_reg_value_roffs(CR_CHAN(cmd->convert_arg), - NI_AI_ConvertClock, - &devpriv->routing_tables, 1)); + ni_get_reg_value_roffs( + CR_CHAN(cmd->convert_arg), + NI_AI_ConvertClock, + &devpriv->routing_tables, 1)); if ((cmd->convert_arg & CR_INVERT) == 0) mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY; ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG); @@ -2970,9 +2974,11 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev, NISTC_AO_TRIG_START1_SYNC; } else { /* TRIG_EXT */ trigsel = NISTC_AO_TRIG_START1_SEL( - ni_get_reg_value_roffs(CR_CHAN(cmd->start_arg), - NI_AO_StartTrigger, - &devpriv->routing_tables, 1)); + ni_get_reg_value_roffs( + CR_CHAN(cmd->start_arg), + NI_AO_StartTrigger, + &devpriv->routing_tables, 1)); + /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */ if (cmd->start_arg & CR_INVERT) trigsel |= NISTC_AO_TRIG_START1_POLARITY; @@ -3079,12 +3085,10 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev, * zero out these bit fields to be set below. Does an ao-reset do this * automatically? */ - devpriv->ao_mode1 &= ~( - NISTC_AO_MODE1_UI_SRC_MASK | - NISTC_AO_MODE1_UI_SRC_POLARITY | - NISTC_AO_MODE1_UPDATE_SRC_MASK | - NISTC_AO_MODE1_UPDATE_SRC_POLARITY - ); + devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UI_SRC_MASK | + NISTC_AO_MODE1_UI_SRC_POLARITY | + NISTC_AO_MODE1_UPDATE_SRC_MASK | + NISTC_AO_MODE1_UPDATE_SRC_POLARITY); if (cmd->scan_begin_src == TRIG_TIMER) { unsigned int trigvar; @@ -3134,9 +3138,10 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev, /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC( - ni_get_reg_value(CR_CHAN(cmd->scan_begin_arg), - NI_AO_SampleClock, - &devpriv->routing_tables)); + ni_get_reg_value( + CR_CHAN(cmd->scan_begin_arg), + NI_AO_SampleClock, + &devpriv->routing_tables)); if (cmd->scan_begin_arg & CR_INVERT) devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY; } @@ -3673,9 +3678,10 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) cdo_mode_bits = NI_M_CDO_MODE_FIFO_MODE | NI_M_CDO_MODE_HALT_ON_ERROR | NI_M_CDO_MODE_SAMPLE_SRC( - ni_get_reg_value(CR_CHAN(cmd->scan_begin_arg), - NI_DO_SampleClock, - &devpriv->routing_tables)); + ni_get_reg_value( + CR_CHAN(cmd->scan_begin_arg), + NI_DO_SampleClock, + &devpriv->routing_tables)); if (cmd->scan_begin_arg & CR_INVERT) cdo_mode_bits |= NI_M_CDO_MODE_POLARITY; ni_writel(dev, cdo_mode_bits, NI_M_CDO_MODE_REG); @@ -5975,6 +5981,7 @@ static int ni_E_init(struct comedi_device *dev, /* prepare the device for globally-named routes. */ if (ni_assign_device_routes(dev_family, board->name, + board->alt_route_name, &devpriv->routing_tables) < 0) { dev_warn(dev->class_dev, "%s: %s device has no signal routing table.\n", __func__, board->name); diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 14b26fffe049..7c82d5f9778f 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c @@ -888,6 +888,7 @@ static const struct ni_board_struct ni_boards[] = { }, [BOARD_PCIE6251] = { .name = "pcie-6251", + .alt_route_name = "pci-6251", .n_adchan = 16, .ai_maxdata = 0xffff, .ai_fifo_depth = 4095, @@ -976,6 +977,7 @@ static const struct ni_board_struct ni_boards[] = { }, [BOARD_PCIE6259] = { .name = "pcie-6259", + .alt_route_name = "pci-6259", .n_adchan = 32, .ai_maxdata = 0xffff, .ai_fifo_depth = 4095, diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c index 8f398b30f5bf..07cb970340db 100644 --- a/drivers/staging/comedi/drivers/ni_routes.c +++ b/drivers/staging/comedi/drivers/ni_routes.c @@ -50,20 +50,13 @@ #define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)]) /* - * Find the proper route_values and ni_device_routes tables for this particular - * device. - * - * Return: -ENODATA if either was not found; 0 if both were found. + * Find the route values for a device family. */ -static int ni_find_device_routes(const char *device_family, - const char *board_name, - struct ni_route_tables *tables) +static const u8 *ni_find_route_values(const char *device_family) { - const struct ni_device_routes *dr = NULL; const u8 *rv = NULL; int i; - /* First, find the register_values table for this device family */ for (i = 0; ni_all_route_values[i]; ++i) { if (memcmp(ni_all_route_values[i]->family, device_family, strnlen(device_family, 30)) == 0) { @@ -71,8 +64,18 @@ static int ni_find_device_routes(const char *device_family, break; } } + return rv; +} + +/* + * Find the valid routes for a board. + */ +static const struct ni_device_routes * +ni_find_valid_routes(const char *board_name) +{ + const struct ni_device_routes *dr = NULL; + int i; - /* Second, find the set of routes valid for this device. */ for (i = 0; ni_device_routes_list[i]; ++i) { if (memcmp(ni_device_routes_list[i]->device, board_name, strnlen(board_name, 30)) == 0) { @@ -80,6 +83,31 @@ static int ni_find_device_routes(const char *device_family, break; } } + return dr; +} + +/* + * Find the proper route_values and ni_device_routes tables for this particular + * device. Possibly try an alternate board name if device routes not found + * for the actual board name. + * + * Return: -ENODATA if either was not found; 0 if both were found. + */ +static int ni_find_device_routes(const char *device_family, + const char *board_name, + const char *alt_board_name, + struct ni_route_tables *tables) +{ + const struct ni_device_routes *dr; + const u8 *rv; + + /* First, find the register_values table for this device family */ + rv = ni_find_route_values(device_family); + + /* Second, find the set of routes valid for this device. */ + dr = ni_find_valid_routes(board_name); + if (!dr && alt_board_name) + dr = ni_find_valid_routes(alt_board_name); tables->route_values = rv; tables->valid_routes = dr; @@ -93,15 +121,28 @@ static int ni_find_device_routes(const char *device_family, /** * ni_assign_device_routes() - Assign the proper lookup table for NI signal * routing to the specified NI device. + * @device_family: Device family name (determines route values). + * @board_name: Board name (determines set of routes). + * @alt_board_name: Optional alternate board name to try on failure. + * @tables: Pointer to assigned routing information. + * + * Finds the route values for the device family and the set of valid routes + * for the board. If valid routes could not be found for the actual board + * name and an alternate board name has been specified, try that one. + * + * On failure, the assigned routing information may be partially filled + * (for example, with the route values but not the set of valid routes). * * Return: -ENODATA if assignment was not successful; 0 if successful. */ int ni_assign_device_routes(const char *device_family, const char *board_name, + const char *alt_board_name, struct ni_route_tables *tables) { memset(tables, 0, sizeof(struct ni_route_tables)); - return ni_find_device_routes(device_family, board_name, tables); + return ni_find_device_routes(device_family, board_name, alt_board_name, + tables); } EXPORT_SYMBOL_GPL(ni_assign_device_routes); diff --git a/drivers/staging/comedi/drivers/ni_routes.h b/drivers/staging/comedi/drivers/ni_routes.h index 3211a16adc6f..b7680fd2afe1 100644 --- a/drivers/staging/comedi/drivers/ni_routes.h +++ b/drivers/staging/comedi/drivers/ni_routes.h @@ -76,6 +76,7 @@ struct ni_route_tables { */ int ni_assign_device_routes(const char *device_family, const char *board_name, + const char *alt_board_name, struct ni_route_tables *tables); /* diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h index 35427f8bf8f7..fbc0b753a0f5 100644 --- a/drivers/staging/comedi/drivers/ni_stc.h +++ b/drivers/staging/comedi/drivers/ni_stc.h @@ -941,6 +941,7 @@ enum ni_reg_type { struct ni_board_struct { const char *name; + const char *alt_route_name; int device_id; int isapnp_id; diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index b264db32a411..f6154addaa95 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -1342,8 +1342,8 @@ static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src) static inline unsigned int ni_tio_get_gate_mode(struct ni_gpct *counter) { - unsigned int mode = ni_tio_get_soft_copy( - counter, NITIO_MODE_REG(counter->counter_index)); + unsigned int mode = ni_tio_get_soft_copy(counter, + NITIO_MODE_REG(counter->counter_index)); unsigned int ret = 0; if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) @@ -1358,8 +1358,8 @@ static inline unsigned int ni_tio_get_gate_mode(struct ni_gpct *counter) static inline unsigned int ni_tio_get_gate2_mode(struct ni_gpct *counter) { - unsigned int mode = ni_tio_get_soft_copy( - counter, NITIO_GATE2_REG(counter->counter_index)); + unsigned int mode = ni_tio_get_soft_copy(counter, + NITIO_GATE2_REG(counter->counter_index)); unsigned int ret = 0; if (!(mode & GI_GATE2_MODE)) diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c index bb400e08f0bc..8c04af09be2c 100644 --- a/drivers/staging/comedi/drivers/rtd520.c +++ b/drivers/staging/comedi/drivers/rtd520.c @@ -815,9 +815,8 @@ static int rtd_ai_cmdtest(struct comedi_device *dev, if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->convert_arg * cmd->scan_end_arg; - err |= comedi_check_trigger_arg_min(&cmd-> - scan_begin_arg, - arg); + err |= comedi_check_trigger_arg_min( + &cmd->scan_begin_arg, arg); } } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 39049d3c56d7..084a8e7b9fc2 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -1892,8 +1892,7 @@ static int s626_ai_cmdtest(struct comedi_device *dev, if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->convert_arg * cmd->scan_end_arg; err |= comedi_check_trigger_arg_min( - &cmd->scan_begin_arg, - arg); + &cmd->scan_begin_arg, arg); } } diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig deleted file mode 100644 index 292a19dfcaf5..000000000000 --- a/drivers/staging/exfat/Kconfig +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config STAGING_EXFAT_FS - tristate "exFAT fs support" - depends on BLOCK - select NLS - help - This adds support for the exFAT file system. - -config STAGING_EXFAT_DISCARD - bool "enable discard support" - depends on STAGING_EXFAT_FS - default y - -config STAGING_EXFAT_DELAYED_SYNC - bool "enable delayed sync" - depends on STAGING_EXFAT_FS - default n - -config STAGING_EXFAT_KERNEL_DEBUG - bool "enable kernel debug features via ioctl" - depends on STAGING_EXFAT_FS - default n - -config STAGING_EXFAT_DEBUG_MSG - bool "print debug messages" - depends on STAGING_EXFAT_FS - default n - -config STAGING_EXFAT_DEFAULT_CODEPAGE - int "Default codepage for exFAT" - default 437 - depends on STAGING_EXFAT_FS - help - This option should be set to the codepage of your exFAT filesystems. - -config STAGING_EXFAT_DEFAULT_IOCHARSET - string "Default iocharset for exFAT" - default "utf8" - depends on STAGING_EXFAT_FS - help - Set this to the default input/output character set you'd like exFAT to use. diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile deleted file mode 100644 index 057556eeca0c..000000000000 --- a/drivers/staging/exfat/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-or-later - -obj-$(CONFIG_STAGING_EXFAT_FS) += exfat.o - -exfat-y := exfat_core.o \ - exfat_super.o \ - exfat_blkdev.o \ - exfat_cache.o \ - exfat_nls.o \ - exfat_upcase.o diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO deleted file mode 100644 index a283ce534cf4..000000000000 --- a/drivers/staging/exfat/TODO +++ /dev/null @@ -1,69 +0,0 @@ -A laundry list of things that need looking at, most of which will -require more work than the average checkpatch cleanup... - -Note that some of these entries may not be bugs - they're things -that need to be looked at, and *possibly* fixed. - -Clean up the ffsCamelCase function names. - -Fix (thing)->flags to not use magic numbers - multiple offenders - -Sort out all the s32/u32/u8 nonsense - most of these should be plain int. - -exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse(). -same for ffsWriteFile. - -All the calls to fs_sync() need to be looked at, particularly in the -context of EXFAT_DELAYED_SYNC. Currently, if that's defined, we only -flush to disk when sync() gets called. We should be doing at least -metadata flushes at appropriate times. - -ffsTruncateFile - if (old_size <= new_size) { -That doesn't look right. How did it ever work? Are they relying on lazy -block allocation when actual writes happen? If nothing else, it never -does the 'fid->size = new_size' and do the inode update.... - -ffsSetAttr() is just dangling in the breeze, not wired up at all... - -Convert global mutexes to a per-superblock mutex. - -Right now, we load exactly one UTF-8 table. Check to see -if that plays nice with different codepage and iocharset values -for simultanous mounts of different devices - -exfat_rmdir() checks for -EBUSY but ffsRemoveDir() doesn't return it. -In fact, there's a complete lack of -EBUSY testing anywhere. - -There's probably a few missing checks for -EEXIST - -check return codes of sync_dirty_buffer() - -Why is remove_file doing a num_entries++?? - -Double check a lot of can't-happen parameter checks (for null pointers for -things that have only one call site and can't pass a null, etc). - -All the DEBUG stuff can probably be tossed, including the ioctl(). Either -that, or convert to a proper fault-injection system. - -exfat_remount does exactly one thing. Fix to actually deal with remount -options, particularly handling R/O correctly. For that matter, allow -R/O mounts in the first place. - -Figure out why the VFAT code used multi_sector_(read|write) but the -exfat code doesn't use it. The difference matters on SSDs with wear leveling. - -exfat_fat_sync(), exfat_buf_sync(), and sync_alloc_bitmap() -aren't called anyplace.... - -Create helper function for exfat_set_entry_time() and exfat_set_entry_type() -because it's sort of ugly to be calling the same functionn directly and -other code calling through the fs_func struc ponters... - -clean up the remaining vol_type checks, which are of two types: -some are ?: operators with magic numbers, and the rest are places -where we're doing stuff with '.' and '..'. - -Patches to: - Greg Kroah-Hartman <gregkh@linuxfoundation.org> - Valdis Kletnieks <valdis.kletnieks@vt.edu> diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h deleted file mode 100644 index 4d87360fab35..000000000000 --- a/drivers/staging/exfat/exfat.h +++ /dev/null @@ -1,824 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#ifndef _EXFAT_H -#define _EXFAT_H - -#include <linux/types.h> -#include <linux/buffer_head.h> - -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - /* For Debugging Purpose */ - /* IOCTL code 'f' used by - * - file systems typically #0~0x1F - * - embedded terminal devices #128~ - * - exts for debugging purpose #99 - * number 100 and 101 is available now but has possible conflicts - */ -#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long) -#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long) - -#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01 -#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02 -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - -#ifdef CONFIG_STAGING_EXFAT_DEBUG_MSG -#define DEBUG 1 -#else -#undef DEBUG -#endif - -#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ - -#define DENTRY_SIZE 32 /* dir entry size */ -#define DENTRY_SIZE_BITS 5 - -/* PBR entries */ -#define PBR_SIGNATURE 0xAA55 -#define EXT_SIGNATURE 0xAA550000 -#define VOL_LABEL "NO NAME " /* size should be 11 */ -#define OEM_NAME "MSWIN4.1" /* size should be 8 */ -#define STR_FAT12 "FAT12 " /* size should be 8 */ -#define STR_FAT16 "FAT16 " /* size should be 8 */ -#define STR_FAT32 "FAT32 " /* size should be 8 */ -#define STR_EXFAT "EXFAT " /* size should be 8 */ -#define VOL_CLEAN 0x0000 -#define VOL_DIRTY 0x0002 - -/* max number of clusters */ -#define FAT12_THRESHOLD 4087 /* 2^12 - 1 + 2 (clu 0 & 1) */ -#define FAT16_THRESHOLD 65527 /* 2^16 - 1 + 2 */ -#define FAT32_THRESHOLD 268435457 /* 2^28 - 1 + 2 */ -#define EXFAT_THRESHOLD 268435457 /* 2^28 - 1 + 2 */ - -/* file types */ -#define TYPE_UNUSED 0x0000 -#define TYPE_DELETED 0x0001 -#define TYPE_INVALID 0x0002 -#define TYPE_CRITICAL_PRI 0x0100 -#define TYPE_BITMAP 0x0101 -#define TYPE_UPCASE 0x0102 -#define TYPE_VOLUME 0x0103 -#define TYPE_DIR 0x0104 -#define TYPE_FILE 0x011F -#define TYPE_SYMLINK 0x015F -#define TYPE_CRITICAL_SEC 0x0200 -#define TYPE_STREAM 0x0201 -#define TYPE_EXTEND 0x0202 -#define TYPE_ACL 0x0203 -#define TYPE_BENIGN_PRI 0x0400 -#define TYPE_GUID 0x0401 -#define TYPE_PADDING 0x0402 -#define TYPE_ACLTAB 0x0403 -#define TYPE_BENIGN_SEC 0x0800 -#define TYPE_ALL 0x0FFF - -/* time modes */ -#define TM_CREATE 0 -#define TM_MODIFY 1 -#define TM_ACCESS 2 - -/* checksum types */ -#define CS_DIR_ENTRY 0 -#define CS_PBR_SECTOR 1 -#define CS_DEFAULT 2 - -#define CLUSTER_16(x) ((u16)(x)) -#define CLUSTER_32(x) ((u32)(x)) - -#define START_SECTOR(x) \ - ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \ - p_fs->data_start_sector) - -#define IS_LAST_SECTOR_IN_CLUSTER(sec) \ - ((((sec) - p_fs->data_start_sector + 1) & \ - ((1 << p_fs->sectors_per_clu_bits) - 1)) == 0) - -#define GET_CLUSTER_FROM_SECTOR(sec) \ - ((u32)((((sec) - p_fs->data_start_sector) >> \ - p_fs->sectors_per_clu_bits) + 2)) - -#define GET16(p_src) \ - (((u16)(p_src)[0]) | (((u16)(p_src)[1]) << 8)) -#define GET32(p_src) \ - (((u32)(p_src)[0]) | (((u32)(p_src)[1]) << 8) | \ - (((u32)(p_src)[2]) << 16) | (((u32)(p_src)[3]) << 24)) -#define GET64(p_src) \ - (((u64)(p_src)[0]) | (((u64)(p_src)[1]) << 8) | \ - (((u64)(p_src)[2]) << 16) | (((u64)(p_src)[3]) << 24) | \ - (((u64)(p_src)[4]) << 32) | (((u64)(p_src)[5]) << 40) | \ - (((u64)(p_src)[6]) << 48) | (((u64)(p_src)[7]) << 56)) - -#define SET16(p_dst, src) \ - do { \ - (p_dst)[0] = (u8)(src); \ - (p_dst)[1] = (u8)(((u16)(src)) >> 8); \ - } while (0) -#define SET32(p_dst, src) \ - do { \ - (p_dst)[0] = (u8)(src); \ - (p_dst)[1] = (u8)(((u32)(src)) >> 8); \ - (p_dst)[2] = (u8)(((u32)(src)) >> 16); \ - (p_dst)[3] = (u8)(((u32)(src)) >> 24); \ - } while (0) -#define SET64(p_dst, src) \ - do { \ - (p_dst)[0] = (u8)(src); \ - (p_dst)[1] = (u8)(((u64)(src)) >> 8); \ - (p_dst)[2] = (u8)(((u64)(src)) >> 16); \ - (p_dst)[3] = (u8)(((u64)(src)) >> 24); \ - (p_dst)[4] = (u8)(((u64)(src)) >> 32); \ - (p_dst)[5] = (u8)(((u64)(src)) >> 40); \ - (p_dst)[6] = (u8)(((u64)(src)) >> 48); \ - (p_dst)[7] = (u8)(((u64)(src)) >> 56); \ - } while (0) - -#ifdef __LITTLE_ENDIAN -#define GET16_A(p_src) (*((u16 *)(p_src))) -#define GET32_A(p_src) (*((u32 *)(p_src))) -#define GET64_A(p_src) (*((u64 *)(p_src))) -#define SET16_A(p_dst, src) (*((u16 *)(p_dst)) = (u16)(src)) -#define SET32_A(p_dst, src) (*((u32 *)(p_dst)) = (u32)(src)) -#define SET64_A(p_dst, src) (*((u64 *)(p_dst)) = (u64)(src)) -#else /* BIG_ENDIAN */ -#define GET16_A(p_src) GET16(p_src) -#define GET32_A(p_src) GET32(p_src) -#define GET64_A(p_src) GET64(p_src) -#define SET16_A(p_dst, src) SET16(p_dst, src) -#define SET32_A(p_dst, src) SET32(p_dst, src) -#define SET64_A(p_dst, src) SET64(p_dst, src) -#endif - -/* cache size (in number of sectors) */ -/* (should be an exponential value of 2) */ -#define FAT_CACHE_SIZE 128 -#define FAT_CACHE_HASH_SIZE 64 -#define BUF_CACHE_SIZE 256 -#define BUF_CACHE_HASH_SIZE 64 - -/* Upcase table macro */ -#define HIGH_INDEX_BIT (8) -#define HIGH_INDEX_MASK (0xFF00) -#define LOW_INDEX_BIT (16 - HIGH_INDEX_BIT) -#define UTBL_ROW_COUNT BIT(LOW_INDEX_BIT) -#define UTBL_COL_COUNT BIT(HIGH_INDEX_BIT) - -static inline u16 get_col_index(u16 i) -{ - return i >> LOW_INDEX_BIT; -} - -static inline u16 get_row_index(u16 i) -{ - return i & ~HIGH_INDEX_MASK; -} - -#define EXFAT_SUPER_MAGIC (0x2011BAB0L) -#define EXFAT_ROOT_INO 1 - -/* FAT types */ -#define FAT12 0x01 /* FAT12 */ -#define FAT16 0x0E /* Win95 FAT16 (LBA) */ -#define FAT32 0x0C /* Win95 FAT32 (LBA) */ -#define EXFAT 0x07 /* exFAT */ - -/* file name lengths */ -#define MAX_CHARSET_SIZE 3 /* max size of multi-byte character */ -#define MAX_PATH_DEPTH 15 /* max depth of path name */ -#define MAX_NAME_LENGTH 256 /* max len of filename including NULL */ -#define MAX_PATH_LENGTH 260 /* max len of pathname including NULL */ -#define DOS_NAME_LENGTH 11 /* DOS filename length excluding NULL */ -#define DOS_PATH_LENGTH 80 /* DOS pathname length excluding NULL */ - -/* file attributes */ -#define ATTR_NORMAL 0x0000 -#define ATTR_READONLY 0x0001 -#define ATTR_HIDDEN 0x0002 -#define ATTR_SYSTEM 0x0004 -#define ATTR_VOLUME 0x0008 -#define ATTR_SUBDIR 0x0010 -#define ATTR_ARCHIVE 0x0020 -#define ATTR_SYMLINK 0x0040 -#define ATTR_EXTEND 0x000F -#define ATTR_RWMASK 0x007E - -/* file creation modes */ -#define FM_REGULAR 0x00 -#define FM_SYMLINK 0x40 - -#define NUM_UPCASE 2918 - -#define DOS_CUR_DIR_NAME ". " -#define DOS_PAR_DIR_NAME ".. " - -#ifdef __LITTLE_ENDIAN -#define UNI_CUR_DIR_NAME ".\0" -#define UNI_PAR_DIR_NAME ".\0.\0" -#else -#define UNI_CUR_DIR_NAME "\0." -#define UNI_PAR_DIR_NAME "\0.\0." -#endif - -struct date_time_t { - u16 Year; - u16 Month; - u16 Day; - u16 Hour; - u16 Minute; - u16 Second; - u16 MilliSecond; -}; - -struct part_info_t { - u32 Offset; /* start sector number of the partition */ - u32 Size; /* in sectors */ -}; - -struct dev_info_t { - u32 SecSize; /* sector size in bytes */ - u32 DevSize; /* block device size in sectors */ -}; - -struct vol_info_t { - u32 FatType; - u32 ClusterSize; - u32 NumClusters; - u32 FreeClusters; - u32 UsedClusters; -}; - -/* directory structure */ -struct chain_t { - u32 dir; - s32 size; - u8 flags; -}; - -struct file_id_t { - struct chain_t dir; - s32 entry; - u32 type; - u32 attr; - u32 start_clu; - u64 size; - u8 flags; - s64 rwoffset; - s32 hint_last_off; - u32 hint_last_clu; -}; - -struct dir_entry_t { - char Name[MAX_NAME_LENGTH * MAX_CHARSET_SIZE]; - - /* used only for FAT12/16/32, not used for exFAT */ - char ShortName[DOS_NAME_LENGTH + 2]; - - u32 Attr; - u64 Size; - u32 NumSubdirs; - struct date_time_t CreateTimestamp; - struct date_time_t ModifyTimestamp; - struct date_time_t AccessTimestamp; -}; - -struct timestamp_t { - u16 sec; /* 0 ~ 59 */ - u16 min; /* 0 ~ 59 */ - u16 hour; /* 0 ~ 23 */ - u16 day; /* 1 ~ 31 */ - u16 mon; /* 1 ~ 12 */ - u16 year; /* 0 ~ 127 (since 1980) */ -}; - -/* MS_DOS FAT partition boot record (512 bytes) */ -struct pbr_sector_t { - u8 jmp_boot[3]; - u8 oem_name[8]; - u8 bpb[109]; - u8 boot_code[390]; - u8 signature[2]; -}; - -/* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */ -struct bpb16_t { - u8 sector_size[2]; - u8 sectors_per_clu; - u8 num_reserved[2]; - u8 num_fats; - u8 num_root_entries[2]; - u8 num_sectors[2]; - u8 media_type; - u8 num_fat_sectors[2]; - u8 sectors_in_track[2]; - u8 num_heads[2]; - u8 num_hid_sectors[4]; - u8 num_huge_sectors[4]; - - u8 phy_drv_no; - u8 reserved; - u8 ext_signature; - u8 vol_serial[4]; - u8 vol_label[11]; - u8 vol_type[8]; -}; - -/* MS-DOS FAT32 BIOS parameter block (79 bytes) */ -struct bpb32_t { - u8 sector_size[2]; - u8 sectors_per_clu; - u8 num_reserved[2]; - u8 num_fats; - u8 num_root_entries[2]; - u8 num_sectors[2]; - u8 media_type; - u8 num_fat_sectors[2]; - u8 sectors_in_track[2]; - u8 num_heads[2]; - u8 num_hid_sectors[4]; - u8 num_huge_sectors[4]; - u8 num_fat32_sectors[4]; - u8 ext_flags[2]; - u8 fs_version[2]; - u8 root_cluster[4]; - u8 fsinfo_sector[2]; - u8 backup_sector[2]; - u8 reserved[12]; - - u8 phy_drv_no; - u8 ext_reserved; - u8 ext_signature; - u8 vol_serial[4]; - u8 vol_label[11]; - u8 vol_type[8]; -}; - -/* MS-DOS EXFAT BIOS parameter block (109 bytes) */ -struct bpbex_t { - u8 reserved1[53]; - u8 vol_offset[8]; - u8 vol_length[8]; - u8 fat_offset[4]; - u8 fat_length[4]; - u8 clu_offset[4]; - u8 clu_count[4]; - u8 root_cluster[4]; - u8 vol_serial[4]; - u8 fs_version[2]; - u8 vol_flags[2]; - u8 sector_size_bits; - u8 sectors_per_clu_bits; - u8 num_fats; - u8 phy_drv_no; - u8 perc_in_use; - u8 reserved2[7]; -}; - -/* MS-DOS FAT file system information sector (512 bytes) */ -struct fsi_sector_t { - u8 signature1[4]; - u8 reserved1[480]; - u8 signature2[4]; - u8 free_cluster[4]; - u8 next_cluster[4]; - u8 reserved2[14]; - u8 signature3[2]; -}; - -/* MS-DOS FAT directory entry (32 bytes) */ -struct dentry_t { - u8 dummy[32]; -}; - -struct dos_dentry_t { - u8 name[DOS_NAME_LENGTH]; - u8 attr; - u8 lcase; - u8 create_time_ms; - u8 create_time[2]; - u8 create_date[2]; - u8 access_date[2]; - u8 start_clu_hi[2]; - u8 modify_time[2]; - u8 modify_date[2]; - u8 start_clu_lo[2]; - u8 size[4]; -}; - -/* MS-DOS FAT extended directory entry (32 bytes) */ -struct ext_dentry_t { - u8 order; - u8 unicode_0_4[10]; - u8 attr; - u8 sysid; - u8 checksum; - u8 unicode_5_10[12]; - u8 start_clu[2]; - u8 unicode_11_12[4]; -}; - -/* MS-DOS EXFAT file directory entry (32 bytes) */ -struct file_dentry_t { - u8 type; - u8 num_ext; - u8 checksum[2]; - u8 attr[2]; - u8 reserved1[2]; - u8 create_time[2]; - u8 create_date[2]; - u8 modify_time[2]; - u8 modify_date[2]; - u8 access_time[2]; - u8 access_date[2]; - u8 create_time_ms; - u8 modify_time_ms; - u8 access_time_ms; - u8 reserved2[9]; -}; - -/* MS-DOS EXFAT stream extension directory entry (32 bytes) */ -struct strm_dentry_t { - u8 type; - u8 flags; - u8 reserved1; - u8 name_len; - u8 name_hash[2]; - u8 reserved2[2]; - u8 valid_size[8]; - u8 reserved3[4]; - u8 start_clu[4]; - u8 size[8]; -}; - -/* MS-DOS EXFAT file name directory entry (32 bytes) */ -struct name_dentry_t { - u8 type; - u8 flags; - u8 unicode_0_14[30]; -}; - -/* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */ -struct bmap_dentry_t { - u8 type; - u8 flags; - u8 reserved[18]; - u8 start_clu[4]; - u8 size[8]; -}; - -/* MS-DOS EXFAT up-case table directory entry (32 bytes) */ -struct case_dentry_t { - u8 type; - u8 reserved1[3]; - u8 checksum[4]; - u8 reserved2[12]; - u8 start_clu[4]; - u8 size[8]; -}; - -/* MS-DOS EXFAT volume label directory entry (32 bytes) */ -struct volm_dentry_t { - u8 type; - u8 label_len; - u8 unicode_0_10[22]; - u8 reserved[8]; -}; - -/* unused entry hint information */ -struct uentry_t { - u32 dir; - s32 entry; - struct chain_t clu; -}; - -/* DOS name structure */ -struct dos_name_t { - u8 name[DOS_NAME_LENGTH]; - u8 name_case; -}; - -/* unicode name structure */ -struct uni_name_t { - u16 name[MAX_NAME_LENGTH]; - u16 name_hash; - u8 name_len; -}; - -struct buf_cache_t { - struct buf_cache_t *next; - struct buf_cache_t *prev; - struct buf_cache_t *hash_next; - struct buf_cache_t *hash_prev; - s32 drv; - sector_t sec; - u32 flag; - struct buffer_head *buf_bh; -}; - -struct fs_info_t { - u32 drv; /* drive ID */ - u32 vol_type; /* volume FAT type */ - u32 vol_id; /* volume serial number */ - - u64 num_sectors; /* num of sectors in volume */ - u32 num_clusters; /* num of clusters in volume */ - u32 cluster_size; /* cluster size in bytes */ - u32 cluster_size_bits; - u32 sectors_per_clu; /* cluster size in sectors */ - u32 sectors_per_clu_bits; - - u32 PBR_sector; /* PBR sector */ - u32 FAT1_start_sector; /* FAT1 start sector */ - u32 FAT2_start_sector; /* FAT2 start sector */ - u32 root_start_sector; /* root dir start sector */ - u32 data_start_sector; /* data area start sector */ - u32 num_FAT_sectors; /* num of FAT sectors */ - - u32 root_dir; /* root dir cluster */ - u32 dentries_in_root; /* num of dentries in root dir */ - u32 dentries_per_clu; /* num of dentries per cluster */ - - u32 vol_flag; /* volume dirty flag */ - struct buffer_head *pbr_bh; /* PBR sector */ - - u32 map_clu; /* allocation bitmap start cluster */ - u32 map_sectors; /* num of allocation bitmap sectors */ - struct buffer_head **vol_amap; /* allocation bitmap */ - - u16 **vol_utbl; /* upcase table */ - - u32 clu_srch_ptr; /* cluster search pointer */ - u32 used_clusters; /* number of used clusters */ - struct uentry_t hint_uentry; /* unused entry hint information */ - - u32 dev_ejected; /* block device operation error flag */ - - struct mutex v_mutex; - - /* FAT cache */ - struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE]; - struct buf_cache_t FAT_cache_lru_list; - struct buf_cache_t FAT_cache_hash_list[FAT_CACHE_HASH_SIZE]; - - /* buf cache */ - struct buf_cache_t buf_cache_array[BUF_CACHE_SIZE]; - struct buf_cache_t buf_cache_lru_list; - struct buf_cache_t buf_cache_hash_list[BUF_CACHE_HASH_SIZE]; -}; - -#define ES_2_ENTRIES 2 -#define ES_3_ENTRIES 3 -#define ES_ALL_ENTRIES 0 - -struct entry_set_cache_t { - /* sector number that contains file_entry */ - sector_t sector; - - /* byte offset in the sector */ - s32 offset; - - /* - * flag in stream entry. - * 01 for cluster chain, - * 03 for contig. clusteres. - */ - s32 alloc_flag; - - u32 num_entries; - - /* __buf should be the last member */ - void *__buf; -}; - -#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */ -#define EXFAT_ERRORS_PANIC 2 /* panic on error */ -#define EXFAT_ERRORS_RO 3 /* remount r/o on error */ - -/* ioctl command */ -#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32) - -struct exfat_mount_options { - kuid_t fs_uid; - kgid_t fs_gid; - unsigned short fs_fmask; - unsigned short fs_dmask; - - /* permission for setting the [am]time */ - unsigned short allow_utime; - - /* codepage for shortname conversions */ - unsigned short codepage; - - /* charset for filename input/display */ - char *iocharset; - - unsigned char casesensitive; - - /* on error: continue, panic, remount-ro */ - unsigned char errors; -#ifdef CONFIG_STAGING_EXFAT_DISCARD - /* flag on if -o dicard specified and device support discard() */ - unsigned char discard; -#endif /* CONFIG_STAGING_EXFAT_DISCARD */ -}; - -#define EXFAT_HASH_BITS 8 -#define EXFAT_HASH_SIZE BIT(EXFAT_HASH_BITS) - -/* - * EXFAT file system in-core superblock data - */ -struct bd_info_t { - s32 sector_size; /* in bytes */ - s32 sector_size_bits; - s32 sector_size_mask; - - /* total number of sectors in this block device */ - s32 num_sectors; - - /* opened or not */ - bool opened; -}; - -struct exfat_sb_info { - struct fs_info_t fs_info; - struct bd_info_t bd_info; - - struct exfat_mount_options options; - - int s_dirt; - struct mutex s_lock; - struct nls_table *nls_disk; /* Codepage used on disk */ - struct nls_table *nls_io; /* Charset used for input and display */ - - struct inode *fat_inode; - - spinlock_t inode_hash_lock; - struct hlist_head inode_hashtable[EXFAT_HASH_SIZE]; -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - long debug_flags; -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ -}; - -/* - * EXFAT file system inode data in memory - */ -struct exfat_inode_info { - struct file_id_t fid; - char *target; - /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */ - loff_t mmu_private; /* physically allocated size */ - loff_t i_pos; /* on-disk position of directory entry or 0 */ - struct hlist_node i_hash_fat; /* hash by i_location */ - struct rw_semaphore truncate_lock; - struct inode vfs_inode; - struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */ -}; - -#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info)) - -static inline struct exfat_inode_info *EXFAT_I(struct inode *inode) -{ - return container_of(inode, struct exfat_inode_info, vfs_inode); -} - -/* NLS management function */ -u16 nls_upper(struct super_block *sb, u16 a); -int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b); -void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring, - struct uni_name_t *p_uniname); -void nls_cstring_to_uniname(struct super_block *sb, - struct uni_name_t *p_uniname, u8 *p_cstring, - bool *p_lossy); - -/* buffer cache management */ -void exfat_buf_init(struct super_block *sb); -void exfat_buf_shutdown(struct super_block *sb); -int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content); -s32 exfat_fat_write(struct super_block *sb, u32 loc, u32 content); -u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec); -void exfat_fat_modify(struct super_block *sb, sector_t sec); -void exfat_fat_release_all(struct super_block *sb); -void exfat_fat_sync(struct super_block *sb); -u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec); -void exfat_buf_modify(struct super_block *sb, sector_t sec); -void exfat_buf_lock(struct super_block *sb, sector_t sec); -void exfat_buf_unlock(struct super_block *sb, sector_t sec); -void exfat_buf_release(struct super_block *sb, sector_t sec); -void exfat_buf_release_all(struct super_block *sb); -void exfat_buf_sync(struct super_block *sb); - -/* fs management functions */ -void fs_set_vol_flags(struct super_block *sb, u32 new_flag); -void fs_error(struct super_block *sb); - -/* cluster management functions */ -s32 count_num_clusters(struct super_block *sb, struct chain_t *dir); -void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len); - -/* allocation bitmap management functions */ -s32 load_alloc_bitmap(struct super_block *sb); -void free_alloc_bitmap(struct super_block *sb); -void sync_alloc_bitmap(struct super_block *sb); - -/* upcase table management functions */ -s32 load_upcase_table(struct super_block *sb); -void free_upcase_table(struct super_block *sb); - -/* dir entry management functions */ -struct timestamp_t *tm_current(struct timestamp_t *tm); - -struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir, - s32 entry, sector_t *sector); -struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb, - struct chain_t *p_dir, s32 entry, - u32 type, - struct dentry_t **file_ep); -void release_entry_set(struct entry_set_cache_t *es); -s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir, - u32 type); -void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir, - s32 entry); -void update_dir_checksum_with_entry_set(struct super_block *sb, - struct entry_set_cache_t *es); -bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir); - -/* name conversion functions */ -s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir, - struct uni_name_t *p_uniname, s32 *entries, - struct dos_name_t *p_dosname); -u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type); - -/* name resolution functions */ -s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir, - struct uni_name_t *p_uniname); - -/* file operation functions */ -s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr); -s32 create_dir(struct inode *inode, struct chain_t *p_dir, - struct uni_name_t *p_uniname, struct file_id_t *fid); -s32 create_file(struct inode *inode, struct chain_t *p_dir, - struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid); -void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry); -s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, - struct uni_name_t *p_uniname, struct file_id_t *fid); -s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, - struct chain_t *p_newdir, struct uni_name_t *p_uniname, - struct file_id_t *fid); - -/* sector read/write functions */ -int sector_read(struct super_block *sb, sector_t sec, - struct buffer_head **bh, bool read); -int sector_write(struct super_block *sb, sector_t sec, - struct buffer_head *bh, bool sync); -int multi_sector_read(struct super_block *sb, sector_t sec, - struct buffer_head **bh, s32 num_secs, bool read); -int multi_sector_write(struct super_block *sb, sector_t sec, - struct buffer_head *bh, s32 num_secs, bool sync); - -void exfat_bdev_open(struct super_block *sb); -void exfat_bdev_close(struct super_block *sb); -int exfat_bdev_read(struct super_block *sb, sector_t secno, - struct buffer_head **bh, u32 num_secs, bool read); -int exfat_bdev_write(struct super_block *sb, sector_t secno, - struct buffer_head *bh, u32 num_secs, bool sync); -int exfat_bdev_sync(struct super_block *sb); - -/* cluster operation functions */ -s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, - struct chain_t *p_chain); -void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain, - s32 do_relse); -s32 exfat_count_used_clusters(struct super_block *sb); - -/* dir operation functions */ -s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, - struct uni_name_t *p_uniname, s32 num_entries, - struct dos_name_t *p_dosname, u32 type); -void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, - s32 entry, s32 order, s32 num_entries); -void exfat_get_uni_name_from_ext_entry(struct super_block *sb, - struct chain_t *p_dir, s32 entry, - u16 *uniname); -s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, - s32 entry, struct dentry_t *p_entry); -s32 exfat_calc_num_entries(struct uni_name_t *p_uniname); - -/* dir entry getter/setter */ -u32 exfat_get_entry_type(struct dentry_t *p_entry); -u32 exfat_get_entry_attr(struct dentry_t *p_entry); -void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr); -u8 exfat_get_entry_flag(struct dentry_t *p_entry); -void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags); -u32 exfat_get_entry_clu0(struct dentry_t *p_entry); -void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu); -u64 exfat_get_entry_size(struct dentry_t *p_entry); -void exfat_set_entry_size(struct dentry_t *p_entry, u64 size); -void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, - u8 mode); -void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, - u8 mode); - -extern const u8 uni_upcase[]; -#endif /* _EXFAT_H */ diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c deleted file mode 100644 index 0a3dc3568293..000000000000 --- a/drivers/staging/exfat/exfat_blkdev.c +++ /dev/null @@ -1,136 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/blkdev.h> -#include <linux/buffer_head.h> -#include <linux/fs.h> -#include "exfat.h" - -void exfat_bdev_open(struct super_block *sb) -{ - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - if (p_bd->opened) - return; - - p_bd->sector_size = bdev_logical_block_size(sb->s_bdev); - p_bd->sector_size_bits = ilog2(p_bd->sector_size); - p_bd->sector_size_mask = p_bd->sector_size - 1; - p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >> - p_bd->sector_size_bits; - p_bd->opened = true; -} - -void exfat_bdev_close(struct super_block *sb) -{ - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - p_bd->opened = false; -} - -int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh, - u32 num_secs, bool read) -{ - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - struct exfat_sb_info *sbi = EXFAT_SB(sb); - long flags = sbi->debug_flags; - - if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) - return -EIO; -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - - if (!p_bd->opened) - return -ENODEV; - - if (*bh) - __brelse(*bh); - - if (read) - *bh = __bread(sb->s_bdev, secno, - num_secs << p_bd->sector_size_bits); - else - *bh = __getblk(sb->s_bdev, secno, - num_secs << p_bd->sector_size_bits); - - if (*bh) - return 0; - - WARN(!p_fs->dev_ejected, - "[EXFAT] No bh, device seems wrong or to be ejected.\n"); - - return -EIO; -} - -int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh, - u32 num_secs, bool sync) -{ - s32 count; - struct buffer_head *bh2; - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - struct exfat_sb_info *sbi = EXFAT_SB(sb); - long flags = sbi->debug_flags; - - if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) - return -EIO; -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - - if (!p_bd->opened) - return -ENODEV; - - if (secno == bh->b_blocknr) { - lock_buffer(bh); - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - unlock_buffer(bh); - if (sync && (sync_dirty_buffer(bh) != 0)) - return -EIO; - } else { - count = num_secs << p_bd->sector_size_bits; - - bh2 = __getblk(sb->s_bdev, secno, count); - if (!bh2) - goto no_bh; - - lock_buffer(bh2); - memcpy(bh2->b_data, bh->b_data, count); - set_buffer_uptodate(bh2); - mark_buffer_dirty(bh2); - unlock_buffer(bh2); - if (sync && (sync_dirty_buffer(bh2) != 0)) { - __brelse(bh2); - goto no_bh; - } - __brelse(bh2); - } - - return 0; - -no_bh: - WARN(!p_fs->dev_ejected, - "[EXFAT] No bh, device seems wrong or to be ejected.\n"); - - return -EIO; -} - -int exfat_bdev_sync(struct super_block *sb) -{ - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - struct exfat_sb_info *sbi = EXFAT_SB(sb); - long flags = sbi->debug_flags; - - if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) - return -EIO; -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - - if (!p_bd->opened) - return -ENODEV; - - return sync_blockdev(sb->s_bdev); -} diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c deleted file mode 100644 index 3fd5604058a9..000000000000 --- a/drivers/staging/exfat/exfat_cache.c +++ /dev/null @@ -1,555 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/buffer_head.h> -#include <linux/fs.h> -#include <linux/mutex.h> -#include "exfat.h" - -#define LOCKBIT 0x01 -#define DIRTYBIT 0x02 - -/* Local variables */ -static DEFINE_MUTEX(f_mutex); -static DEFINE_MUTEX(b_mutex); - -static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec) -{ - s32 off; - struct buf_cache_t *bp, *hp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - off = (sec + - (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1); - - hp = &p_fs->FAT_cache_hash_list[off]; - for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { - if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { - WARN(!bp->buf_bh, - "[EXFAT] FAT_cache has no bh. It will make system panic.\n"); - - touch_buffer(bp->buf_bh); - return bp; - } - } - return NULL; -} - -static void push_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list) -{ - bp->next = list->next; - bp->prev = list; - list->next->prev = bp; - list->next = bp; -} - -static void push_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list) -{ - bp->prev = list->prev; - bp->next = list; - list->prev->next = bp; - list->prev = bp; -} - -static void move_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list) -{ - bp->prev->next = bp->next; - bp->next->prev = bp->prev; - push_to_mru(bp, list); -} - -static void move_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list) -{ - bp->prev->next = bp->next; - bp->next->prev = bp->prev; - push_to_lru(bp, list); -} - -static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - bp = p_fs->FAT_cache_lru_list.prev; - - move_to_mru(bp, &p_fs->FAT_cache_lru_list); - return bp; -} - -static void FAT_cache_insert_hash(struct super_block *sb, - struct buf_cache_t *bp) -{ - s32 off; - struct buf_cache_t *hp; - struct fs_info_t *p_fs; - - p_fs = &(EXFAT_SB(sb)->fs_info); - off = (bp->sec + - (bp->sec >> p_fs->sectors_per_clu_bits)) & - (FAT_CACHE_HASH_SIZE - 1); - - hp = &p_fs->FAT_cache_hash_list[off]; - bp->hash_next = hp->hash_next; - bp->hash_prev = hp; - hp->hash_next->hash_prev = bp; - hp->hash_next = bp; -} - -static void FAT_cache_remove_hash(struct buf_cache_t *bp) -{ - (bp->hash_prev)->hash_next = bp->hash_next; - (bp->hash_next)->hash_prev = bp->hash_prev; -} - -static void buf_cache_insert_hash(struct super_block *sb, - struct buf_cache_t *bp) -{ - s32 off; - struct buf_cache_t *hp; - struct fs_info_t *p_fs; - - p_fs = &(EXFAT_SB(sb)->fs_info); - off = (bp->sec + - (bp->sec >> p_fs->sectors_per_clu_bits)) & - (BUF_CACHE_HASH_SIZE - 1); - - hp = &p_fs->buf_cache_hash_list[off]; - bp->hash_next = hp->hash_next; - bp->hash_prev = hp; - hp->hash_next->hash_prev = bp; - hp->hash_next = bp; -} - -static void buf_cache_remove_hash(struct buf_cache_t *bp) -{ - (bp->hash_prev)->hash_next = bp->hash_next; - (bp->hash_next)->hash_prev = bp->hash_prev; -} - -void exfat_buf_init(struct super_block *sb) -{ - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - int i; - - /* LRU list */ - p_fs->FAT_cache_lru_list.next = &p_fs->FAT_cache_lru_list; - p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list; - - for (i = 0; i < FAT_CACHE_SIZE; i++) { - p_fs->FAT_cache_array[i].drv = -1; - p_fs->FAT_cache_array[i].sec = ~0; - p_fs->FAT_cache_array[i].flag = 0; - p_fs->FAT_cache_array[i].buf_bh = NULL; - p_fs->FAT_cache_array[i].prev = NULL; - p_fs->FAT_cache_array[i].next = NULL; - push_to_mru(&p_fs->FAT_cache_array[i], - &p_fs->FAT_cache_lru_list); - } - - p_fs->buf_cache_lru_list.next = &p_fs->buf_cache_lru_list; - p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list; - - for (i = 0; i < BUF_CACHE_SIZE; i++) { - p_fs->buf_cache_array[i].drv = -1; - p_fs->buf_cache_array[i].sec = ~0; - p_fs->buf_cache_array[i].flag = 0; - p_fs->buf_cache_array[i].buf_bh = NULL; - p_fs->buf_cache_array[i].prev = NULL; - p_fs->buf_cache_array[i].next = NULL; - push_to_mru(&p_fs->buf_cache_array[i], - &p_fs->buf_cache_lru_list); - } - - /* HASH list */ - for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) { - p_fs->FAT_cache_hash_list[i].drv = -1; - p_fs->FAT_cache_hash_list[i].sec = ~0; - p_fs->FAT_cache_hash_list[i].hash_next = - &p_fs->FAT_cache_hash_list[i]; - p_fs->FAT_cache_hash_list[i].hash_prev = - &p_fs->FAT_cache_hash_list[i]; - } - - for (i = 0; i < FAT_CACHE_SIZE; i++) - FAT_cache_insert_hash(sb, &p_fs->FAT_cache_array[i]); - - for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) { - p_fs->buf_cache_hash_list[i].drv = -1; - p_fs->buf_cache_hash_list[i].sec = ~0; - p_fs->buf_cache_hash_list[i].hash_next = - &p_fs->buf_cache_hash_list[i]; - p_fs->buf_cache_hash_list[i].hash_prev = - &p_fs->buf_cache_hash_list[i]; - } - - for (i = 0; i < BUF_CACHE_SIZE; i++) - buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]); -} - -void exfat_buf_shutdown(struct super_block *sb) -{ -} - -static int __exfat_fat_read(struct super_block *sb, u32 loc, u32 *content) -{ - s32 off; - u32 _content; - sector_t sec; - u8 *fat_sector, *fat_entry; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - sec = p_fs->FAT1_start_sector + - (loc >> (p_bd->sector_size_bits - 2)); - off = (loc << 2) & p_bd->sector_size_mask; - - fat_sector = exfat_fat_getblk(sb, sec); - if (!fat_sector) - return -1; - - fat_entry = &fat_sector[off]; - _content = GET32_A(fat_entry); - - if (_content >= CLUSTER_32(0xFFFFFFF8)) { - *content = CLUSTER_32(~0); - return 0; - } - *content = CLUSTER_32(_content); - return 0; -} - -/* in : sb, loc - * out: content - * returns 0 on success - * -1 on error - */ -int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content) -{ - s32 ret; - - mutex_lock(&f_mutex); - ret = __exfat_fat_read(sb, loc, content); - mutex_unlock(&f_mutex); - - return ret; -} - -static s32 __exfat_fat_write(struct super_block *sb, u32 loc, u32 content) -{ - s32 off; - sector_t sec; - u8 *fat_sector, *fat_entry; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - sec = p_fs->FAT1_start_sector + (loc >> - (p_bd->sector_size_bits - 2)); - off = (loc << 2) & p_bd->sector_size_mask; - - fat_sector = exfat_fat_getblk(sb, sec); - if (!fat_sector) - return -1; - - fat_entry = &fat_sector[off]; - - SET32_A(fat_entry, content); - - exfat_fat_modify(sb, sec); - return 0; -} - -int exfat_fat_write(struct super_block *sb, u32 loc, u32 content) -{ - s32 ret; - - mutex_lock(&f_mutex); - ret = __exfat_fat_write(sb, loc, content); - mutex_unlock(&f_mutex); - - return ret; -} - -u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - bp = FAT_cache_find(sb, sec); - if (bp) { - move_to_mru(bp, &p_fs->FAT_cache_lru_list); - return bp->buf_bh->b_data; - } - - bp = FAT_cache_get(sb, sec); - - FAT_cache_remove_hash(bp); - - bp->drv = p_fs->drv; - bp->sec = sec; - bp->flag = 0; - - FAT_cache_insert_hash(sb, bp); - - if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) { - FAT_cache_remove_hash(bp); - bp->drv = -1; - bp->sec = ~0; - bp->flag = 0; - bp->buf_bh = NULL; - - move_to_lru(bp, &p_fs->FAT_cache_lru_list); - return NULL; - } - - return bp->buf_bh->b_data; -} - -void exfat_fat_modify(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - - bp = FAT_cache_find(sb, sec); - if (bp) - sector_write(sb, sec, bp->buf_bh, 0); -} - -void exfat_fat_release_all(struct super_block *sb) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - mutex_lock(&f_mutex); - - bp = p_fs->FAT_cache_lru_list.next; - while (bp != &p_fs->FAT_cache_lru_list) { - if (bp->drv == p_fs->drv) { - bp->drv = -1; - bp->sec = ~0; - bp->flag = 0; - - if (bp->buf_bh) { - __brelse(bp->buf_bh); - bp->buf_bh = NULL; - } - } - bp = bp->next; - } - - mutex_unlock(&f_mutex); -} - -void exfat_fat_sync(struct super_block *sb) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - mutex_lock(&f_mutex); - - bp = p_fs->FAT_cache_lru_list.next; - while (bp != &p_fs->FAT_cache_lru_list) { - if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { - sync_dirty_buffer(bp->buf_bh); - bp->flag &= ~(DIRTYBIT); - } - bp = bp->next; - } - - mutex_unlock(&f_mutex); -} - -static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec) -{ - s32 off; - struct buf_cache_t *bp, *hp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - off = (sec + (sec >> p_fs->sectors_per_clu_bits)) & - (BUF_CACHE_HASH_SIZE - 1); - - hp = &p_fs->buf_cache_hash_list[off]; - for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { - if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { - touch_buffer(bp->buf_bh); - return bp; - } - } - return NULL; -} - -static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - bp = p_fs->buf_cache_lru_list.prev; - while (bp->flag & LOCKBIT) - bp = bp->prev; - - move_to_mru(bp, &p_fs->buf_cache_lru_list); - return bp; -} - -static u8 *__exfat_buf_getblk(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - bp = buf_cache_find(sb, sec); - if (bp) { - move_to_mru(bp, &p_fs->buf_cache_lru_list); - return bp->buf_bh->b_data; - } - - bp = buf_cache_get(sb, sec); - - buf_cache_remove_hash(bp); - - bp->drv = p_fs->drv; - bp->sec = sec; - bp->flag = 0; - - buf_cache_insert_hash(sb, bp); - - if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) { - buf_cache_remove_hash(bp); - bp->drv = -1; - bp->sec = ~0; - bp->flag = 0; - bp->buf_bh = NULL; - - move_to_lru(bp, &p_fs->buf_cache_lru_list); - return NULL; - } - - return bp->buf_bh->b_data; -} - -u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec) -{ - u8 *buf; - - mutex_lock(&b_mutex); - buf = __exfat_buf_getblk(sb, sec); - mutex_unlock(&b_mutex); - - return buf; -} - -void exfat_buf_modify(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - - mutex_lock(&b_mutex); - - bp = buf_cache_find(sb, sec); - if (likely(bp)) - sector_write(sb, sec, bp->buf_bh, 0); - - WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", - (unsigned long long)sec); - - mutex_unlock(&b_mutex); -} - -void exfat_buf_lock(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - - mutex_lock(&b_mutex); - - bp = buf_cache_find(sb, sec); - if (likely(bp)) - bp->flag |= LOCKBIT; - - WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", - (unsigned long long)sec); - - mutex_unlock(&b_mutex); -} - -void exfat_buf_unlock(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - - mutex_lock(&b_mutex); - - bp = buf_cache_find(sb, sec); - if (likely(bp)) - bp->flag &= ~(LOCKBIT); - - WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", - (unsigned long long)sec); - - mutex_unlock(&b_mutex); -} - -void exfat_buf_release(struct super_block *sb, sector_t sec) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - mutex_lock(&b_mutex); - - bp = buf_cache_find(sb, sec); - if (likely(bp)) { - bp->drv = -1; - bp->sec = ~0; - bp->flag = 0; - - if (bp->buf_bh) { - __brelse(bp->buf_bh); - bp->buf_bh = NULL; - } - - move_to_lru(bp, &p_fs->buf_cache_lru_list); - } - - mutex_unlock(&b_mutex); -} - -void exfat_buf_release_all(struct super_block *sb) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - mutex_lock(&b_mutex); - - bp = p_fs->buf_cache_lru_list.next; - while (bp != &p_fs->buf_cache_lru_list) { - if (bp->drv == p_fs->drv) { - bp->drv = -1; - bp->sec = ~0; - bp->flag = 0; - - if (bp->buf_bh) { - __brelse(bp->buf_bh); - bp->buf_bh = NULL; - } - } - bp = bp->next; - } - - mutex_unlock(&b_mutex); -} - -void exfat_buf_sync(struct super_block *sb) -{ - struct buf_cache_t *bp; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - mutex_lock(&b_mutex); - - bp = p_fs->buf_cache_lru_list.next; - while (bp != &p_fs->buf_cache_lru_list) { - if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { - sync_dirty_buffer(bp->buf_bh); - bp->flag &= ~(DIRTYBIT); - } - bp = bp->next; - } - - mutex_unlock(&b_mutex); -} diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c deleted file mode 100644 index 07b460d01334..000000000000 --- a/drivers/staging/exfat/exfat_core.c +++ /dev/null @@ -1,2582 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/types.h> -#include <linux/buffer_head.h> -#include <linux/fs.h> -#include <linux/mutex.h> -#include <linux/blkdev.h> -#include <linux/slab.h> -#include "exfat.h" - -static void __set_sb_dirty(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - sbi->s_dirt = 1; -} - -static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE]; - -static u8 free_bit[] = { - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */ - 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */ - 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */ - 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */ - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */ - 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */ - 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */ - 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */ - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */ - 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */ - 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */ - 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */ - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */ -}; - -static u8 used_bit[] = { - 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */ - 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */ - 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */ - 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */ - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */ - 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */ - 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */ - 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */ - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */ - 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */ - 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */ - 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */ - 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */ -}; - -#define BITMAP_LOC(v) ((v) >> 3) -#define BITMAP_SHIFT(v) ((v) & 0x07) - -static inline s32 exfat_bitmap_test(u8 *bitmap, int i) -{ - u8 data; - - data = bitmap[BITMAP_LOC(i)]; - if ((data >> BITMAP_SHIFT(i)) & 0x01) - return 1; - return 0; -} - -static inline void exfat_bitmap_set(u8 *bitmap, int i) -{ - bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i)); -} - -static inline void exfat_bitmap_clear(u8 *bitmap, int i) -{ - bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i)); -} - -/* - * File System Management Functions - */ - -void fs_set_vol_flags(struct super_block *sb, u32 new_flag) -{ - struct pbr_sector_t *p_pbr; - struct bpbex_t *p_bpb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (p_fs->vol_flag == new_flag) - return; - - p_fs->vol_flag = new_flag; - - if (!p_fs->pbr_bh) { - if (sector_read(sb, p_fs->PBR_sector, - &p_fs->pbr_bh, 1) != 0) - return; - } - - p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data; - p_bpb = (struct bpbex_t *)p_pbr->bpb; - SET16(p_bpb->vol_flags, (u16)new_flag); - - /* XXX duyoung - * what can we do here? (cuz fs_set_vol_flags() is void) - */ - if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh))) - sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1); - else - sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0); -} - -void fs_error(struct super_block *sb) -{ - struct exfat_mount_options *opts = &EXFAT_SB(sb)->options; - - if (opts->errors == EXFAT_ERRORS_PANIC) { - panic("[EXFAT] Filesystem panic from previous error\n"); - } else if ((opts->errors == EXFAT_ERRORS_RO) && !sb_rdonly(sb)) { - sb->s_flags |= SB_RDONLY; - pr_err("[EXFAT] Filesystem has been set read-only\n"); - } -} - -/* - * Cluster Management Functions - */ - -static s32 clear_cluster(struct super_block *sb, u32 clu) -{ - sector_t s, n; - s32 ret = 0; - struct buffer_head *tmp_bh = NULL; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */ - s = p_fs->root_start_sector; - n = p_fs->data_start_sector; - } else { - s = START_SECTOR(clu); - n = s + p_fs->sectors_per_clu; - } - - for (; s < n; s++) { - ret = sector_read(sb, s, &tmp_bh, 0); - if (ret != 0) - return ret; - - memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size); - ret = sector_write(sb, s, tmp_bh, 0); - if (ret != 0) - break; - } - - brelse(tmp_bh); - return ret; -} - -static s32 set_alloc_bitmap(struct super_block *sb, u32 clu) -{ - int i, b; - sector_t sector; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - i = clu >> (p_bd->sector_size_bits + 3); - b = clu & ((p_bd->sector_size << 3) - 1); - - sector = START_SECTOR(p_fs->map_clu) + i; - - exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b); - - return sector_write(sb, sector, p_fs->vol_amap[i], 0); -} - -static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu) -{ - int i, b; - sector_t sector; -#ifdef CONFIG_STAGING_EXFAT_DISCARD - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct exfat_mount_options *opts = &sbi->options; - int ret; -#endif /* CONFIG_STAGING_EXFAT_DISCARD */ - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - i = clu >> (p_bd->sector_size_bits + 3); - b = clu & ((p_bd->sector_size << 3) - 1); - - sector = START_SECTOR(p_fs->map_clu) + i; - - exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b); - -#ifdef CONFIG_STAGING_EXFAT_DISCARD - if (opts->discard) { - ret = sb_issue_discard(sb, START_SECTOR(clu), - (1 << p_fs->sectors_per_clu_bits), - GFP_NOFS, 0); - if (ret == -EOPNOTSUPP) { - pr_warn("discard not supported by device, disabling"); - opts->discard = 0; - } else { - return ret; - } - } -#endif /* CONFIG_STAGING_EXFAT_DISCARD */ - - return sector_write(sb, sector, p_fs->vol_amap[i], 0); -} - -static u32 test_alloc_bitmap(struct super_block *sb, u32 clu) -{ - int i, map_i, map_b; - u32 clu_base, clu_free; - u8 k, clu_mask; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - clu_base = (clu & ~(0x7)) + 2; - clu_mask = (1 << (clu - clu_base + 2)) - 1; - - map_i = clu >> (p_bd->sector_size_bits + 3); - map_b = (clu >> 3) & p_bd->sector_size_mask; - - for (i = 2; i < p_fs->num_clusters; i += 8) { - k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b); - if (clu_mask > 0) { - k |= clu_mask; - clu_mask = 0; - } - if (k < 0xFF) { - clu_free = clu_base + free_bit[k]; - if (clu_free < p_fs->num_clusters) - return clu_free; - } - clu_base += 8; - - if (((++map_b) >= p_bd->sector_size) || - (clu_base >= p_fs->num_clusters)) { - if ((++map_i) >= p_fs->map_sectors) { - clu_base = 2; - map_i = 0; - } - map_b = 0; - } - } - - return CLUSTER_32(~0); -} - -s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, - struct chain_t *p_chain) -{ - s32 num_clusters = 0; - u32 hint_clu, new_clu, last_clu = CLUSTER_32(~0); - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - hint_clu = p_chain->dir; - if (hint_clu == CLUSTER_32(~0)) { - hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr - 2); - if (hint_clu == CLUSTER_32(~0)) - return 0; - } else if (hint_clu >= p_fs->num_clusters) { - hint_clu = 2; - p_chain->flags = 0x01; - } - - __set_sb_dirty(sb); - - p_chain->dir = CLUSTER_32(~0); - - while ((new_clu = test_alloc_bitmap(sb, hint_clu - 2)) != CLUSTER_32(~0)) { - if (new_clu != hint_clu) { - if (p_chain->flags == 0x03) { - exfat_chain_cont_cluster(sb, p_chain->dir, - num_clusters); - p_chain->flags = 0x01; - } - } - - if (set_alloc_bitmap(sb, new_clu - 2) != 0) - return -EIO; - - num_clusters++; - - if (p_chain->flags == 0x01) { - if (exfat_fat_write(sb, new_clu, CLUSTER_32(~0)) < 0) - return -EIO; - } - - if (p_chain->dir == CLUSTER_32(~0)) { - p_chain->dir = new_clu; - } else { - if (p_chain->flags == 0x01) { - if (exfat_fat_write(sb, last_clu, new_clu) < 0) - return -EIO; - } - } - last_clu = new_clu; - - if ((--num_alloc) == 0) { - p_fs->clu_srch_ptr = hint_clu; - if (p_fs->used_clusters != UINT_MAX) - p_fs->used_clusters += num_clusters; - - p_chain->size += num_clusters; - return num_clusters; - } - - hint_clu = new_clu + 1; - if (hint_clu >= p_fs->num_clusters) { - hint_clu = 2; - - if (p_chain->flags == 0x03) { - exfat_chain_cont_cluster(sb, p_chain->dir, - num_clusters); - p_chain->flags = 0x01; - } - } - } - - p_fs->clu_srch_ptr = hint_clu; - if (p_fs->used_clusters != UINT_MAX) - p_fs->used_clusters += num_clusters; - - p_chain->size += num_clusters; - return num_clusters; -} - -void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain, - s32 do_relse) -{ - s32 num_clusters = 0; - u32 clu; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - int i; - sector_t sector; - - if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) - return; - - if (p_chain->size <= 0) { - pr_err("[EXFAT] free_cluster : skip free-req clu:%u, because of zero-size truncation\n", - p_chain->dir); - return; - } - - __set_sb_dirty(sb); - clu = p_chain->dir; - - if (p_chain->flags == 0x03) { - do { - if (do_relse) { - sector = START_SECTOR(clu); - for (i = 0; i < p_fs->sectors_per_clu; i++) - exfat_buf_release(sb, sector + i); - } - - if (clr_alloc_bitmap(sb, clu - 2) != 0) - break; - clu++; - - num_clusters++; - } while (num_clusters < p_chain->size); - } else { - do { - if (p_fs->dev_ejected) - break; - - if (do_relse) { - sector = START_SECTOR(clu); - for (i = 0; i < p_fs->sectors_per_clu; i++) - exfat_buf_release(sb, sector + i); - } - - if (clr_alloc_bitmap(sb, clu - 2) != 0) - break; - - if (exfat_fat_read(sb, clu, &clu) == -1) - break; - num_clusters++; - } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0))); - } - - if (p_fs->used_clusters != UINT_MAX) - p_fs->used_clusters -= num_clusters; -} - -static u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain) -{ - u32 clu, next; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - clu = p_chain->dir; - - if (p_chain->flags == 0x03) { - clu += p_chain->size - 1; - } else { - while ((exfat_fat_read(sb, clu, &next) == 0) && - (next != CLUSTER_32(~0))) { - if (p_fs->dev_ejected) - break; - clu = next; - } - } - - return clu; -} - -s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain) -{ - int i, count = 0; - u32 clu; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) - return 0; - - clu = p_chain->dir; - - if (p_chain->flags == 0x03) { - count = p_chain->size; - } else { - for (i = 2; i < p_fs->num_clusters; i++) { - count++; - if (exfat_fat_read(sb, clu, &clu) != 0) - return 0; - if (clu == CLUSTER_32(~0)) - break; - } - } - - return count; -} - -s32 exfat_count_used_clusters(struct super_block *sb) -{ - int i, map_i, map_b, count = 0; - u8 k; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - map_i = 0; - map_b = 0; - - for (i = 2; i < p_fs->num_clusters; i += 8) { - k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b); - count += used_bit[k]; - - if ((++map_b) >= p_bd->sector_size) { - map_i++; - map_b = 0; - } - } - - return count; -} - -void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len) -{ - if (len == 0) - return; - - while (len > 1) { - if (exfat_fat_write(sb, chain, chain + 1) < 0) - break; - chain++; - len--; - } - exfat_fat_write(sb, chain, CLUSTER_32(~0)); -} - -/* - * Allocation Bitmap Management Functions - */ - -s32 load_alloc_bitmap(struct super_block *sb) -{ - int i, j, ret; - u32 map_size; - u32 type; - sector_t sector; - struct chain_t clu; - struct bmap_dentry_t *ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - clu.dir = p_fs->root_dir; - clu.flags = 0x01; - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - for (i = 0; i < p_fs->dentries_per_clu; i++) { - ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu, - i, NULL); - if (!ep) - return -ENOENT; - - type = exfat_get_entry_type((struct dentry_t *)ep); - - if (type == TYPE_UNUSED) - break; - if (type != TYPE_BITMAP) - continue; - - if (ep->flags == 0x0) { - p_fs->map_clu = GET32_A(ep->start_clu); - map_size = (u32)GET64_A(ep->size); - - p_fs->map_sectors = ((map_size - 1) >> p_bd->sector_size_bits) + 1; - - p_fs->vol_amap = kmalloc_array(p_fs->map_sectors, - sizeof(struct buffer_head *), - GFP_KERNEL); - if (!p_fs->vol_amap) - return -ENOMEM; - - sector = START_SECTOR(p_fs->map_clu); - - for (j = 0; j < p_fs->map_sectors; j++) { - p_fs->vol_amap[j] = NULL; - ret = sector_read(sb, sector + j, &p_fs->vol_amap[j], 1); - if (ret != 0) { - /* release all buffers and free vol_amap */ - i = 0; - while (i < j) - brelse(p_fs->vol_amap[i++]); - - kfree(p_fs->vol_amap); - p_fs->vol_amap = NULL; - return ret; - } - } - - p_fs->pbr_bh = NULL; - return 0; - } - } - - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - return -EIO; - } - - return -EFSCORRUPTED; -} - -void free_alloc_bitmap(struct super_block *sb) -{ - int i; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - brelse(p_fs->pbr_bh); - - for (i = 0; i < p_fs->map_sectors; i++) - __brelse(p_fs->vol_amap[i]); - - kfree(p_fs->vol_amap); - p_fs->vol_amap = NULL; -} - -void sync_alloc_bitmap(struct super_block *sb) -{ - int i; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (!p_fs->vol_amap) - return; - - for (i = 0; i < p_fs->map_sectors; i++) - sync_dirty_buffer(p_fs->vol_amap[i]); -} - -/* - * Upcase table Management Functions - */ -static s32 __load_upcase_table(struct super_block *sb, sector_t sector, - u32 num_sectors, u32 utbl_checksum) -{ - int i, ret = -EINVAL; - u32 j; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - struct buffer_head *tmp_bh = NULL; - sector_t end_sector = num_sectors + sector; - - bool skip = false; - u32 index = 0; - u16 uni = 0; - u16 **upcase_table; - - u32 checksum = 0; - - upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL); - p_fs->vol_utbl = upcase_table; - if (!upcase_table) - return -ENOMEM; - memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *)); - - while (sector < end_sector) { - ret = sector_read(sb, sector, &tmp_bh, 1); - if (ret != 0) { - pr_debug("sector read (0x%llX)fail\n", - (unsigned long long)sector); - goto error; - } - sector++; - - for (i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) { - uni = GET16(((u8 *)tmp_bh->b_data) + i); - - checksum = ((checksum & 1) ? 0x80000000 : 0) + - (checksum >> 1) + *(((u8 *)tmp_bh->b_data) + - i); - checksum = ((checksum & 1) ? 0x80000000 : 0) + - (checksum >> 1) + *(((u8 *)tmp_bh->b_data) + - (i + 1)); - - if (skip) { - pr_debug("skip from 0x%X ", index); - index += uni; - pr_debug("to 0x%X (amount of 0x%X)\n", - index, uni); - skip = false; - } else if (uni == index) { - index++; - } else if (uni == 0xFFFF) { - skip = true; - } else { /* uni != index , uni != 0xFFFF */ - u16 col_index = get_col_index(index); - - if (!upcase_table[col_index]) { - pr_debug("alloc = 0x%X\n", col_index); - upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT, - sizeof(u16), GFP_KERNEL); - if (!upcase_table[col_index]) { - ret = -ENOMEM; - goto error; - } - - for (j = 0; j < UTBL_ROW_COUNT; j++) - upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; - } - - upcase_table[col_index][get_row_index(index)] = uni; - index++; - } - } - } - if (index >= 0xFFFF && utbl_checksum == checksum) { - if (tmp_bh) - brelse(tmp_bh); - return 0; - } - ret = -EINVAL; -error: - if (tmp_bh) - brelse(tmp_bh); - free_upcase_table(sb); - return ret; -} - -static s32 __load_default_upcase_table(struct super_block *sb) -{ - int i, ret = -EINVAL; - u32 j; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - bool skip = false; - u32 index = 0; - u16 uni = 0; - u16 **upcase_table; - - upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL); - p_fs->vol_utbl = upcase_table; - if (!upcase_table) - return -ENOMEM; - memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *)); - - for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) { - uni = GET16(uni_upcase + i); - if (skip) { - pr_debug("skip from 0x%X ", index); - index += uni; - pr_debug("to 0x%X (amount of 0x%X)\n", index, uni); - skip = false; - } else if (uni == index) { - index++; - } else if (uni == 0xFFFF) { - skip = true; - } else { /* uni != index , uni != 0xFFFF */ - u16 col_index = get_col_index(index); - - if (!upcase_table[col_index]) { - pr_debug("alloc = 0x%X\n", col_index); - upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT, - sizeof(u16), - GFP_KERNEL); - if (!upcase_table[col_index]) { - ret = -ENOMEM; - goto error; - } - - for (j = 0; j < UTBL_ROW_COUNT; j++) - upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; - } - - upcase_table[col_index][get_row_index(index)] = uni; - index++; - } - } - - if (index >= 0xFFFF) - return 0; - -error: - /* FATAL error: default upcase table has error */ - free_upcase_table(sb); - return ret; -} - -s32 load_upcase_table(struct super_block *sb) -{ - int i; - u32 tbl_clu, tbl_size; - sector_t sector; - u32 type, num_sectors; - struct chain_t clu; - struct case_dentry_t *ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - clu.dir = p_fs->root_dir; - clu.flags = 0x01; - - if (p_fs->dev_ejected) - return -EIO; - - while (clu.dir != CLUSTER_32(~0)) { - for (i = 0; i < p_fs->dentries_per_clu; i++) { - ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu, - i, NULL); - if (!ep) - return -ENOENT; - - type = exfat_get_entry_type((struct dentry_t *)ep); - - if (type == TYPE_UNUSED) - break; - if (type != TYPE_UPCASE) - continue; - - tbl_clu = GET32_A(ep->start_clu); - tbl_size = (u32)GET64_A(ep->size); - - sector = START_SECTOR(tbl_clu); - num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1; - if (__load_upcase_table(sb, sector, num_sectors, - GET32_A(ep->checksum)) != 0) - break; - return 0; - } - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - return -EIO; - } - /* load default upcase table */ - return __load_default_upcase_table(sb); -} - -void free_upcase_table(struct super_block *sb) -{ - u32 i; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - u16 **upcase_table; - - upcase_table = p_fs->vol_utbl; - for (i = 0; i < UTBL_COL_COUNT; i++) - kfree(upcase_table[i]); - - kfree(p_fs->vol_utbl); - p_fs->vol_utbl = NULL; -} - -/* - * Directory Entry Management Functions - */ - -u32 exfat_get_entry_type(struct dentry_t *p_entry) -{ - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - if (ep->type == 0x0) { - return TYPE_UNUSED; - } else if (ep->type < 0x80) { - return TYPE_DELETED; - } else if (ep->type == 0x80) { - return TYPE_INVALID; - } else if (ep->type < 0xA0) { - if (ep->type == 0x81) { - return TYPE_BITMAP; - } else if (ep->type == 0x82) { - return TYPE_UPCASE; - } else if (ep->type == 0x83) { - return TYPE_VOLUME; - } else if (ep->type == 0x85) { - if (GET16_A(ep->attr) & ATTR_SUBDIR) - return TYPE_DIR; - else - return TYPE_FILE; - } - return TYPE_CRITICAL_PRI; - } else if (ep->type < 0xC0) { - if (ep->type == 0xA0) - return TYPE_GUID; - else if (ep->type == 0xA1) - return TYPE_PADDING; - else if (ep->type == 0xA2) - return TYPE_ACLTAB; - return TYPE_BENIGN_PRI; - } else if (ep->type < 0xE0) { - if (ep->type == 0xC0) - return TYPE_STREAM; - else if (ep->type == 0xC1) - return TYPE_EXTEND; - else if (ep->type == 0xC2) - return TYPE_ACL; - return TYPE_CRITICAL_SEC; - } - - return TYPE_BENIGN_SEC; -} - -static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type) -{ - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - if (type == TYPE_UNUSED) { - ep->type = 0x0; - } else if (type == TYPE_DELETED) { - ep->type &= ~0x80; - } else if (type == TYPE_STREAM) { - ep->type = 0xC0; - } else if (type == TYPE_EXTEND) { - ep->type = 0xC1; - } else if (type == TYPE_BITMAP) { - ep->type = 0x81; - } else if (type == TYPE_UPCASE) { - ep->type = 0x82; - } else if (type == TYPE_VOLUME) { - ep->type = 0x83; - } else if (type == TYPE_DIR) { - ep->type = 0x85; - SET16_A(ep->attr, ATTR_SUBDIR); - } else if (type == TYPE_FILE) { - ep->type = 0x85; - SET16_A(ep->attr, ATTR_ARCHIVE); - } else if (type == TYPE_SYMLINK) { - ep->type = 0x85; - SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK); - } -} - -u32 exfat_get_entry_attr(struct dentry_t *p_entry) -{ - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - return (u32)GET16_A(ep->attr); -} - -void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr) -{ - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - SET16_A(ep->attr, (u16)attr); -} - -u8 exfat_get_entry_flag(struct dentry_t *p_entry) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - return ep->flags; -} - -void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - ep->flags = flags; -} - -u32 exfat_get_entry_clu0(struct dentry_t *p_entry) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - return GET32_A(ep->start_clu); -} - -void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - SET32_A(ep->start_clu, start_clu); -} - -u64 exfat_get_entry_size(struct dentry_t *p_entry) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - return GET64_A(ep->valid_size); -} - -void exfat_set_entry_size(struct dentry_t *p_entry, u64 size) -{ - struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; - - SET64_A(ep->valid_size, size); - SET64_A(ep->size, size); -} - -void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, - u8 mode) -{ - u16 t = 0x00, d = 0x21; - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - switch (mode) { - case TM_CREATE: - t = GET16_A(ep->create_time); - d = GET16_A(ep->create_date); - break; - case TM_MODIFY: - t = GET16_A(ep->modify_time); - d = GET16_A(ep->modify_date); - break; - case TM_ACCESS: - t = GET16_A(ep->access_time); - d = GET16_A(ep->access_date); - break; - } - - tp->sec = (t & 0x001F) << 1; - tp->min = (t >> 5) & 0x003F; - tp->hour = (t >> 11); - tp->day = (d & 0x001F); - tp->mon = (d >> 5) & 0x000F; - tp->year = (d >> 9); -} - -void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, - u8 mode) -{ - u16 t, d; - struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; - - t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1); - d = (tp->year << 9) | (tp->mon << 5) | tp->day; - - switch (mode) { - case TM_CREATE: - SET16_A(ep->create_time, t); - SET16_A(ep->create_date, d); - break; - case TM_MODIFY: - SET16_A(ep->modify_time, t); - SET16_A(ep->modify_date, d); - break; - case TM_ACCESS: - SET16_A(ep->access_time, t); - SET16_A(ep->access_date, d); - break; - } -} - -static void init_file_entry(struct file_dentry_t *ep, u32 type) -{ - struct timestamp_t tm, *tp; - - exfat_set_entry_type((struct dentry_t *)ep, type); - - tp = tm_current(&tm); - exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE); - exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY); - exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS); - ep->create_time_ms = 0; - ep->modify_time_ms = 0; - ep->access_time_ms = 0; -} - -static void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size) -{ - exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM); - ep->flags = flags; - SET32_A(ep->start_clu, start_clu); - SET64_A(ep->valid_size, size); - SET64_A(ep->size, size); -} - -static void init_name_entry(struct name_dentry_t *ep, u16 *uniname) -{ - int i; - - exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND); - ep->flags = 0x0; - - for (i = 0; i < 30; i++, i++) { - SET16_A(ep->unicode_0_14 + i, *uniname); - if (*uniname == 0x0) - break; - uniname++; - } -} - -static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, - s32 entry, u32 type, u32 start_clu, u64 size) -{ - sector_t sector; - u8 flags; - struct file_dentry_t *file_ep; - struct strm_dentry_t *strm_ep; - - flags = (type == TYPE_FILE) ? 0x01 : 0x03; - - /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */ - file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, - §or); - if (!file_ep) - return -ENOENT; - - strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1, - §or); - if (!strm_ep) - return -ENOENT; - - init_file_entry(file_ep, type); - exfat_buf_modify(sb, sector); - - init_strm_entry(strm_ep, flags, start_clu, size); - exfat_buf_modify(sb, sector); - - return 0; -} - -static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir, - s32 entry, s32 num_entries, - struct uni_name_t *p_uniname, - struct dos_name_t *p_dosname) -{ - int i; - sector_t sector; - u16 *uniname = p_uniname->name; - struct file_dentry_t *file_ep; - struct strm_dentry_t *strm_ep; - struct name_dentry_t *name_ep; - - file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, - §or); - if (!file_ep) - return -ENOENT; - - file_ep->num_ext = (u8)(num_entries - 1); - exfat_buf_modify(sb, sector); - - strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1, - §or); - if (!strm_ep) - return -ENOENT; - - strm_ep->name_len = p_uniname->name_len; - SET16_A(strm_ep->name_hash, p_uniname->name_hash); - exfat_buf_modify(sb, sector); - - for (i = 2; i < num_entries; i++) { - name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir, - entry + i, - §or); - if (!name_ep) - return -ENOENT; - - init_name_entry(name_ep, uniname); - exfat_buf_modify(sb, sector); - uniname += 15; - } - - update_dir_checksum(sb, p_dir, entry); - - return 0; -} - -void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, - s32 entry, s32 order, s32 num_entries) -{ - int i; - sector_t sector; - struct dentry_t *ep; - - for (i = order; i < num_entries; i++) { - ep = get_entry_in_dir(sb, p_dir, entry + i, §or); - if (!ep) - return; - - exfat_set_entry_type(ep, TYPE_DELETED); - exfat_buf_modify(sb, sector); - } -} - -void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir, - s32 entry) -{ - int i, num_entries; - sector_t sector; - u16 chksum; - struct file_dentry_t *file_ep; - struct dentry_t *ep; - - file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, - §or); - if (!file_ep) - return; - - exfat_buf_lock(sb, sector); - - num_entries = (s32)file_ep->num_ext + 1; - chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0, - CS_DIR_ENTRY); - - for (i = 1; i < num_entries; i++) { - ep = get_entry_in_dir(sb, p_dir, entry + i, NULL); - if (!ep) { - exfat_buf_unlock(sb, sector); - return; - } - - chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum, - CS_DEFAULT); - } - - SET16_A(file_ep->checksum, chksum); - exfat_buf_modify(sb, sector); - exfat_buf_unlock(sb, sector); -} - -static s32 __write_partial_entries_in_entry_set(struct super_block *sb, - struct entry_set_cache_t *es, - sector_t sec, s32 off, u32 count) -{ - s32 num_entries, buf_off = (off - es->offset); - u32 remaining_byte_in_sector, copy_entries; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - u32 clu; - u8 *buf, *esbuf = (u8 *)&es->__buf; - - pr_debug("%s entered es %p sec %llu off %d count %d\n", - __func__, es, (unsigned long long)sec, off, count); - num_entries = count; - - while (num_entries) { - /* white per sector base */ - remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off; - copy_entries = min_t(s32, - remaining_byte_in_sector >> DENTRY_SIZE_BITS, - num_entries); - buf = exfat_buf_getblk(sb, sec); - if (!buf) - goto err_out; - pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off); - pr_debug("copying %d entries from %p to sector %llu\n", - copy_entries, (esbuf + buf_off), - (unsigned long long)sec); - memcpy(buf + off, esbuf + buf_off, - copy_entries << DENTRY_SIZE_BITS); - exfat_buf_modify(sb, sec); - num_entries -= copy_entries; - - if (num_entries) { - /* get next sector */ - if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { - clu = GET_CLUSTER_FROM_SECTOR(sec); - if (es->alloc_flag == 0x03) { - clu++; - } else { - if (exfat_fat_read(sb, clu, &clu) == -1) - goto err_out; - } - sec = START_SECTOR(clu); - } else { - sec++; - } - off = 0; - buf_off += copy_entries << DENTRY_SIZE_BITS; - } - } - - pr_debug("%s exited successfully\n", __func__); - return 0; -err_out: - pr_debug("%s failed\n", __func__); - return -EINVAL; -} - -/* write back all entries in entry set */ -static s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es) -{ - return __write_partial_entries_in_entry_set(sb, es, es->sector, - es->offset, - es->num_entries); -} - -void update_dir_checksum_with_entry_set(struct super_block *sb, - struct entry_set_cache_t *es) -{ - struct dentry_t *ep; - u16 chksum = 0; - s32 chksum_type = CS_DIR_ENTRY, i; - - ep = (struct dentry_t *)&es->__buf; - for (i = 0; i < es->num_entries; i++) { - pr_debug("%s ep %p\n", __func__, ep); - chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum, - chksum_type); - ep++; - chksum_type = CS_DEFAULT; - } - - ep = (struct dentry_t *)&es->__buf; - SET16_A(((struct file_dentry_t *)ep)->checksum, chksum); - write_whole_entry_set(sb, es); -} - -static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir, - s32 byte_offset, u32 *clu) -{ - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - s32 clu_offset; - u32 cur_clu; - - clu_offset = byte_offset >> p_fs->cluster_size_bits; - cur_clu = p_dir->dir; - - if (p_dir->flags == 0x03) { - cur_clu += clu_offset; - } else { - while (clu_offset > 0) { - if (exfat_fat_read(sb, cur_clu, &cur_clu) == -1) - return -EIO; - clu_offset--; - } - } - - if (clu) - *clu = cur_clu; - return 0; -} - -static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry, - sector_t *sector, s32 *offset) -{ - s32 off, ret; - u32 clu = 0; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - off = entry << DENTRY_SIZE_BITS; - - if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */ - *offset = off & p_bd->sector_size_mask; - *sector = off >> p_bd->sector_size_bits; - *sector += p_fs->root_start_sector; - } else { - ret = _walk_fat_chain(sb, p_dir, off, &clu); - if (ret != 0) - return ret; - - /* byte offset in cluster */ - off &= p_fs->cluster_size - 1; - - /* byte offset in sector */ - *offset = off & p_bd->sector_size_mask; - - /* sector offset in cluster */ - *sector = off >> p_bd->sector_size_bits; - *sector += START_SECTOR(clu); - } - return 0; -} - -struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir, - s32 entry, sector_t *sector) -{ - s32 off; - sector_t sec; - u8 *buf; - - if (find_location(sb, p_dir, entry, &sec, &off) != 0) - return NULL; - - buf = exfat_buf_getblk(sb, sec); - - if (!buf) - return NULL; - - if (sector) - *sector = sec; - return (struct dentry_t *)(buf + off); -} - -/* returns a set of dentries for a file or dir. - * Note that this is a copy (dump) of dentries so that user should call write_entry_set() - * to apply changes made in this entry set to the real device. - * in: - * sb+p_dir+entry: indicates a file/dir - * type: specifies how many dentries should be included. - * out: - * file_ep: will point the first dentry(= file dentry) on success - * return: - * pointer of entry set on success, - * NULL on failure. - */ - -#define ES_MODE_STARTED 0 -#define ES_MODE_GET_FILE_ENTRY 1 -#define ES_MODE_GET_STRM_ENTRY 2 -#define ES_MODE_GET_NAME_ENTRY 3 -#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4 -struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb, - struct chain_t *p_dir, s32 entry, - u32 type, - struct dentry_t **file_ep) -{ - s32 off, ret, byte_offset; - u32 clu = 0; - sector_t sec; - u32 entry_type; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - struct entry_set_cache_t *es = NULL; - struct dentry_t *ep, *pos; - u8 *buf; - u8 num_entries; - s32 mode = ES_MODE_STARTED; - size_t bufsize; - - pr_debug("%s entered p_dir dir %u flags %x size %d\n", - __func__, p_dir->dir, p_dir->flags, p_dir->size); - - byte_offset = entry << DENTRY_SIZE_BITS; - ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu); - if (ret != 0) - return NULL; - - /* byte offset in cluster */ - byte_offset &= p_fs->cluster_size - 1; - - /* byte offset in sector */ - off = byte_offset & p_bd->sector_size_mask; - - /* sector offset in cluster */ - sec = byte_offset >> p_bd->sector_size_bits; - sec += START_SECTOR(clu); - - buf = exfat_buf_getblk(sb, sec); - if (!buf) - goto err_out; - - ep = (struct dentry_t *)(buf + off); - entry_type = exfat_get_entry_type(ep); - - if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR)) - goto err_out; - - if (type == ES_ALL_ENTRIES) - num_entries = ((struct file_dentry_t *)ep)->num_ext + 1; - else - num_entries = type; - - bufsize = offsetof(struct entry_set_cache_t, __buf) + (num_entries) * - sizeof(struct dentry_t); - pr_debug("%s: trying to kmalloc %zx bytes for %d entries\n", __func__, - bufsize, num_entries); - es = kmalloc(bufsize, GFP_KERNEL); - if (!es) - goto err_out; - - es->num_entries = num_entries; - es->sector = sec; - es->offset = off; - es->alloc_flag = p_dir->flags; - - pos = (struct dentry_t *)&es->__buf; - - while (num_entries) { - /* - * instead of copying whole sector, we will check every entry. - * this will provide minimum stablity and consistency. - */ - entry_type = exfat_get_entry_type(ep); - - if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) - goto err_out; - - switch (mode) { - case ES_MODE_STARTED: - if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) - mode = ES_MODE_GET_FILE_ENTRY; - else - goto err_out; - break; - case ES_MODE_GET_FILE_ENTRY: - if (entry_type == TYPE_STREAM) - mode = ES_MODE_GET_STRM_ENTRY; - else - goto err_out; - break; - case ES_MODE_GET_STRM_ENTRY: - if (entry_type == TYPE_EXTEND) - mode = ES_MODE_GET_NAME_ENTRY; - else - goto err_out; - break; - case ES_MODE_GET_NAME_ENTRY: - if (entry_type == TYPE_EXTEND) - break; - else if (entry_type == TYPE_STREAM) - goto err_out; - else if (entry_type & TYPE_CRITICAL_SEC) - mode = ES_MODE_GET_CRITICAL_SEC_ENTRY; - else - goto err_out; - break; - case ES_MODE_GET_CRITICAL_SEC_ENTRY: - if ((entry_type == TYPE_EXTEND) || - (entry_type == TYPE_STREAM)) - goto err_out; - else if ((entry_type & TYPE_CRITICAL_SEC) != - TYPE_CRITICAL_SEC) - goto err_out; - break; - } - - memcpy(pos, ep, sizeof(struct dentry_t)); - - if (--num_entries == 0) - break; - - if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) < - (off & p_bd->sector_size_mask)) { - /* get the next sector */ - if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { - if (es->alloc_flag == 0x03) { - clu++; - } else { - if (exfat_fat_read(sb, clu, &clu) == -1) - goto err_out; - } - sec = START_SECTOR(clu); - } else { - sec++; - } - buf = exfat_buf_getblk(sb, sec); - if (!buf) - goto err_out; - off = 0; - ep = (struct dentry_t *)(buf); - } else { - ep++; - off += DENTRY_SIZE; - } - pos++; - } - - if (file_ep) - *file_ep = (struct dentry_t *)&es->__buf; - - pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n", - __func__, es, (unsigned long long)es->sector, es->offset, - es->alloc_flag, es->num_entries, &es->__buf); - return es; -err_out: - pr_debug("%s exited NULL (es %p)\n", __func__, es); - kfree(es); - return NULL; -} - -void release_entry_set(struct entry_set_cache_t *es) -{ - pr_debug("%s es=%p\n", __func__, es); - kfree(es); -} - -/* search EMPTY CONTINUOUS "num_entries" entries */ -static s32 search_deleted_or_unused_entry(struct super_block *sb, - struct chain_t *p_dir, - s32 num_entries) -{ - int i, dentry, num_empty = 0; - s32 dentries_per_clu; - u32 type; - struct chain_t clu; - struct dentry_t *ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - dentries_per_clu = p_fs->dentries_in_root; - else - dentries_per_clu = p_fs->dentries_per_clu; - - if (p_fs->hint_uentry.dir == p_dir->dir) { - if (p_fs->hint_uentry.entry == -1) - return -1; - - clu.dir = p_fs->hint_uentry.clu.dir; - clu.size = p_fs->hint_uentry.clu.size; - clu.flags = p_fs->hint_uentry.clu.flags; - - dentry = p_fs->hint_uentry.entry; - } else { - p_fs->hint_uentry.entry = -1; - - clu.dir = p_dir->dir; - clu.size = p_dir->size; - clu.flags = p_dir->flags; - - dentry = 0; - } - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - i = dentry % dentries_per_clu; - else - i = dentry & (dentries_per_clu - 1); - - for (; i < dentries_per_clu; i++, dentry++) { - ep = get_entry_in_dir(sb, &clu, i, NULL); - if (!ep) - return -1; - - type = exfat_get_entry_type(ep); - - if (type == TYPE_UNUSED) { - num_empty++; - if (p_fs->hint_uentry.entry == -1) { - p_fs->hint_uentry.dir = p_dir->dir; - p_fs->hint_uentry.entry = dentry; - - p_fs->hint_uentry.clu.dir = clu.dir; - p_fs->hint_uentry.clu.size = clu.size; - p_fs->hint_uentry.clu.flags = clu.flags; - } - } else if (type == TYPE_DELETED) { - num_empty++; - } else { - num_empty = 0; - } - - if (num_empty >= num_entries) { - p_fs->hint_uentry.dir = CLUSTER_32(~0); - p_fs->hint_uentry.entry = -1; - - if (p_fs->vol_type == EXFAT) - return dentry - (num_entries - 1); - else - return dentry; - } - } - - if (p_dir->dir == CLUSTER_32(0)) - break; /* FAT16 root_dir */ - - if (clu.flags == 0x03) { - if ((--clu.size) > 0) - clu.dir++; - else - clu.dir = CLUSTER_32(~0); - } else { - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - return -1; - } - } - - return -1; -} - -static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries) -{ - s32 ret, dentry; - u32 last_clu; - sector_t sector; - u64 size = 0; - struct chain_t clu; - struct dentry_t *ep = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - return search_deleted_or_unused_entry(sb, p_dir, num_entries); - - while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) { - if (p_fs->dev_ejected) - break; - - if (p_dir->dir != p_fs->root_dir) - size = i_size_read(inode); - - last_clu = find_last_cluster(sb, p_dir); - clu.dir = last_clu + 1; - clu.size = 0; - clu.flags = p_dir->flags; - - /* (1) allocate a cluster */ - ret = exfat_alloc_cluster(sb, 1, &clu); - if (ret < 1) - return -EIO; - - if (clear_cluster(sb, clu.dir) != 0) - return -EIO; - - /* (2) append to the FAT chain */ - if (clu.flags != p_dir->flags) { - exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size); - p_dir->flags = 0x01; - p_fs->hint_uentry.clu.flags = 0x01; - } - if (clu.flags == 0x01) - if (exfat_fat_write(sb, last_clu, clu.dir) < 0) - return -EIO; - - if (p_fs->hint_uentry.entry == -1) { - p_fs->hint_uentry.dir = p_dir->dir; - p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); - - p_fs->hint_uentry.clu.dir = clu.dir; - p_fs->hint_uentry.clu.size = 0; - p_fs->hint_uentry.clu.flags = clu.flags; - } - p_fs->hint_uentry.clu.size++; - p_dir->size++; - - /* (3) update the directory entry */ - if (p_dir->dir != p_fs->root_dir) { - size += p_fs->cluster_size; - - ep = get_entry_in_dir(sb, &fid->dir, - fid->entry + 1, §or); - if (!ep) - return -ENOENT; - exfat_set_entry_size(ep, size); - exfat_set_entry_flag(ep, p_dir->flags); - exfat_buf_modify(sb, sector); - - update_dir_checksum(sb, &fid->dir, - fid->entry); - } - - i_size_write(inode, i_size_read(inode) + p_fs->cluster_size); - EXFAT_I(inode)->mmu_private += p_fs->cluster_size; - EXFAT_I(inode)->fid.size += p_fs->cluster_size; - EXFAT_I(inode)->fid.flags = p_dir->flags; - inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9); - } - - return dentry; -} - -static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname, - s32 order) -{ - int i, len = 0; - - for (i = 0; i < 30; i += 2) { - *uniname = GET16_A(ep->unicode_0_14 + i); - if (*uniname == 0x0) - return len; - uniname++; - len++; - } - - *uniname = 0x0; - return len; -} - -/* return values of exfat_find_dir_entry() - * >= 0 : return dir entiry position with the name in dir - * -1 : (root dir, ".") it is the root dir itself - * -2 : entry with the name does not exist - */ -s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, - struct uni_name_t *p_uniname, s32 num_entries, - struct dos_name_t *p_dosname, u32 type) -{ - int i = 0, dentry = 0, num_ext_entries = 0, len, step; - s32 order = 0; - bool is_feasible_entry = false; - s32 dentries_per_clu, num_empty = 0; - u32 entry_type; - u16 entry_uniname[16], *uniname = NULL, unichar; - struct chain_t clu; - struct dentry_t *ep; - struct file_dentry_t *file_ep; - struct strm_dentry_t *strm_ep; - struct name_dentry_t *name_ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (p_dir->dir == p_fs->root_dir) { - if ((!nls_uniname_cmp(sb, p_uniname->name, - (u16 *)UNI_CUR_DIR_NAME)) || - (!nls_uniname_cmp(sb, p_uniname->name, - (u16 *)UNI_PAR_DIR_NAME))) - return -1; // special case, root directory itself - } - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - dentries_per_clu = p_fs->dentries_in_root; - else - dentries_per_clu = p_fs->dentries_per_clu; - - clu.dir = p_dir->dir; - clu.size = p_dir->size; - clu.flags = p_dir->flags; - - p_fs->hint_uentry.dir = p_dir->dir; - p_fs->hint_uentry.entry = -1; - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - while (i < dentries_per_clu) { - ep = get_entry_in_dir(sb, &clu, i, NULL); - if (!ep) - return -2; - - entry_type = exfat_get_entry_type(ep); - step = 1; - - if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) { - is_feasible_entry = false; - - if (p_fs->hint_uentry.entry == -1) { - num_empty++; - - if (num_empty == 1) { - p_fs->hint_uentry.clu.dir = clu.dir; - p_fs->hint_uentry.clu.size = clu.size; - p_fs->hint_uentry.clu.flags = clu.flags; - } - if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED)) - p_fs->hint_uentry.entry = dentry - (num_empty - 1); - } - - if (entry_type == TYPE_UNUSED) - return -2; - } else { - num_empty = 0; - - if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) { - file_ep = (struct file_dentry_t *)ep; - if ((type == TYPE_ALL) || (type == entry_type)) { - num_ext_entries = file_ep->num_ext; - is_feasible_entry = true; - } else { - is_feasible_entry = false; - step = file_ep->num_ext + 1; - } - } else if (entry_type == TYPE_STREAM) { - if (is_feasible_entry) { - strm_ep = (struct strm_dentry_t *)ep; - if (p_uniname->name_hash == GET16_A(strm_ep->name_hash) && - p_uniname->name_len == strm_ep->name_len) { - order = 1; - } else { - is_feasible_entry = false; - step = num_ext_entries; - } - } - } else if (entry_type == TYPE_EXTEND) { - if (is_feasible_entry) { - name_ep = (struct name_dentry_t *)ep; - - if ((++order) == 2) - uniname = p_uniname->name; - else - uniname += 15; - - len = extract_uni_name_from_name_entry(name_ep, - entry_uniname, order); - - unichar = *(uniname + len); - *(uniname + len) = 0x0; - - if (nls_uniname_cmp(sb, uniname, entry_uniname)) { - is_feasible_entry = false; - step = num_ext_entries - order + 1; - } else if (order == num_ext_entries) { - p_fs->hint_uentry.dir = CLUSTER_32(~0); - p_fs->hint_uentry.entry = -1; - return dentry - (num_ext_entries); - } - - *(uniname + len) = unichar; - } - } else { - is_feasible_entry = false; - } - } - - i += step; - dentry += step; - } - - i -= dentries_per_clu; - - if (p_dir->dir == CLUSTER_32(0)) - break; /* FAT16 root_dir */ - - if (clu.flags == 0x03) { - if ((--clu.size) > 0) - clu.dir++; - else - clu.dir = CLUSTER_32(~0); - } else { - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - return -2; - } - } - - return -2; -} - -s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, - s32 entry, struct dentry_t *p_entry) -{ - int i, count = 0; - u32 type; - struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry; - struct dentry_t *ext_ep; - - for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) { - ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL); - if (!ext_ep) - return -1; - - type = exfat_get_entry_type(ext_ep); - if ((type == TYPE_EXTEND) || (type == TYPE_STREAM)) - count++; - else - return count; - } - - return count; -} - -s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir, - u32 type) -{ - int i, count = 0; - s32 dentries_per_clu; - u32 entry_type; - struct chain_t clu; - struct dentry_t *ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - dentries_per_clu = p_fs->dentries_in_root; - else - dentries_per_clu = p_fs->dentries_per_clu; - - clu.dir = p_dir->dir; - clu.size = p_dir->size; - clu.flags = p_dir->flags; - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - for (i = 0; i < dentries_per_clu; i++) { - ep = get_entry_in_dir(sb, &clu, i, NULL); - if (!ep) - return -ENOENT; - - entry_type = exfat_get_entry_type(ep); - - if (entry_type == TYPE_UNUSED) - return count; - if (!(type & TYPE_CRITICAL_PRI) && - !(type & TYPE_BENIGN_PRI)) - continue; - - if ((type == TYPE_ALL) || (type == entry_type)) - count++; - } - - if (p_dir->dir == CLUSTER_32(0)) - break; /* FAT16 root_dir */ - - if (clu.flags == 0x03) { - if ((--clu.size) > 0) - clu.dir++; - else - clu.dir = CLUSTER_32(~0); - } else { - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - return -EIO; - } - } - - return count; -} - -bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir) -{ - int i, count = 0; - s32 dentries_per_clu; - u32 type; - struct chain_t clu; - struct dentry_t *ep; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - dentries_per_clu = p_fs->dentries_in_root; - else - dentries_per_clu = p_fs->dentries_per_clu; - - clu.dir = p_dir->dir; - clu.size = p_dir->size; - clu.flags = p_dir->flags; - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - for (i = 0; i < dentries_per_clu; i++) { - ep = get_entry_in_dir(sb, &clu, i, NULL); - if (!ep) - break; - - type = exfat_get_entry_type(ep); - - if (type == TYPE_UNUSED) - return true; - if ((type != TYPE_FILE) && (type != TYPE_DIR)) - continue; - - if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ - return false; - - if (p_fs->vol_type == EXFAT) - return false; - if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2)) - return false; - } - - if (p_dir->dir == CLUSTER_32(0)) - break; /* FAT16 root_dir */ - - if (clu.flags == 0x03) { - if ((--clu.size) > 0) - clu.dir++; - else - clu.dir = CLUSTER_32(~0); - } - if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0) - break; - } - - return true; -} - -/* - * Name Conversion Functions - */ - -/* input : dir, uni_name - * output : num_of_entry, dos_name(format : aaaaaa~1.bbb) - */ -s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir, - struct uni_name_t *p_uniname, s32 *entries, - struct dos_name_t *p_dosname) -{ - s32 num_entries; - - num_entries = exfat_calc_num_entries(p_uniname); - if (num_entries == 0) - return -EINVAL; - - *entries = num_entries; - - return 0; -} - -void exfat_get_uni_name_from_ext_entry(struct super_block *sb, - struct chain_t *p_dir, s32 entry, - u16 *uniname) -{ - int i; - struct dentry_t *ep; - struct entry_set_cache_t *es; - - es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep); - if (!es || es->num_entries < 3) { - if (es) - release_entry_set(es); - return; - } - - ep += 2; - - /* - * First entry : file entry - * Second entry : stream-extension entry - * Third entry : first file-name entry - * So, the index of first file-name dentry should start from 2. - */ - for (i = 2; i < es->num_entries; i++, ep++) { - if (exfat_get_entry_type(ep) == TYPE_EXTEND) - extract_uni_name_from_name_entry((struct name_dentry_t *) - ep, uniname, i); - else - goto out; - uniname += 15; - } - -out: - release_entry_set(es); -} - -s32 exfat_calc_num_entries(struct uni_name_t *p_uniname) -{ - s32 len; - - len = p_uniname->name_len; - if (len == 0) - return 0; - - /* 1 file entry + 1 stream entry + name entries */ - return (len - 1) / 15 + 3; -} - -u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type) -{ - int i; - u8 *c = (u8 *)data; - - switch (type) { - case CS_DIR_ENTRY: - for (i = 0; i < len; i++, c++) { - if ((i == 2) || (i == 3)) - continue; - chksum = (((chksum & 1) << 15) | - ((chksum & 0xFFFE) >> 1)) + (u16)*c; - } - break; - default - : - for (i = 0; i < len; i++, c++) - chksum = (((chksum & 1) << 15) | - ((chksum & 0xFFFE) >> 1)) + (u16)*c; - } - - return chksum; -} - -/* - * Name Resolution Functions - */ - -/* return values of resolve_path() - * > 0 : return the length of the path - * < 0 : return error - */ -s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir, - struct uni_name_t *p_uniname) -{ - bool lossy = false; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - - if (strscpy(name_buf, path, sizeof(name_buf)) < 0) - return -EINVAL; - - nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy); - if (lossy) - return -EINVAL; - - fid->size = i_size_read(inode); - - p_dir->dir = fid->start_clu; - p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits); - p_dir->flags = fid->flags; - - return 0; -} - -s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr) -{ - struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - if (p_bpb->num_fats == 0) - return -EFSCORRUPTED; - - p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits; - p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits; - p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + - p_bd->sector_size_bits; - p_fs->cluster_size = 1 << p_fs->cluster_size_bits; - - p_fs->num_FAT_sectors = GET32(p_bpb->fat_length); - - p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset); - if (p_bpb->num_fats == 1) - p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; - else - p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + - p_fs->num_FAT_sectors; - - p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset); - p_fs->data_start_sector = p_fs->root_start_sector; - - p_fs->num_sectors = GET64(p_bpb->vol_length); - p_fs->num_clusters = GET32(p_bpb->clu_count) + 2; - /* because the cluster index starts with 2 */ - - p_fs->vol_type = EXFAT; - p_fs->vol_id = GET32(p_bpb->vol_serial); - - p_fs->root_dir = GET32(p_bpb->root_cluster); - p_fs->dentries_in_root = 0; - p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - - DENTRY_SIZE_BITS); - - p_fs->vol_flag = (u32)GET16(p_bpb->vol_flags); - p_fs->clu_srch_ptr = 2; - p_fs->used_clusters = UINT_MAX; - - return 0; -} - -s32 create_dir(struct inode *inode, struct chain_t *p_dir, - struct uni_name_t *p_uniname, struct file_id_t *fid) -{ - s32 ret, dentry, num_entries; - u64 size; - struct chain_t clu; - struct dos_name_t dos_name; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, - &dos_name); - if (ret) - return ret; - - /* find_empty_entry must be called before alloc_cluster */ - dentry = find_empty_entry(inode, p_dir, num_entries); - if (dentry < 0) - return -ENOSPC; - - clu.dir = CLUSTER_32(~0); - clu.size = 0; - clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - - /* (1) allocate a cluster */ - ret = exfat_alloc_cluster(sb, 1, &clu); - if (ret < 0) - return ret; - else if (ret == 0) - return -ENOSPC; - - ret = clear_cluster(sb, clu.dir); - if (ret != 0) - return ret; - - size = p_fs->cluster_size; - - /* (2) update the directory entry */ - /* make sub-dir entry in parent directory */ - ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir, - size); - if (ret != 0) - return ret; - - ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, - &dos_name); - if (ret != 0) - return ret; - - fid->dir.dir = p_dir->dir; - fid->dir.size = p_dir->size; - fid->dir.flags = p_dir->flags; - fid->entry = dentry; - - fid->attr = ATTR_SUBDIR; - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - fid->size = size; - fid->start_clu = clu.dir; - - fid->type = TYPE_DIR; - fid->rwoffset = 0; - fid->hint_last_off = -1; - - return 0; -} - -s32 create_file(struct inode *inode, struct chain_t *p_dir, - struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid) -{ - s32 ret, dentry, num_entries; - struct dos_name_t dos_name; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, - &dos_name); - if (ret) - return ret; - - /* find_empty_entry must be called before alloc_cluster() */ - dentry = find_empty_entry(inode, p_dir, num_entries); - if (dentry < 0) - return -ENOSPC; - - /* (1) update the directory entry */ - /* fill the dos name directory entry information of the created file. - * the first cluster is not determined yet. (0) - */ - ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode, - CLUSTER_32(0), 0); - if (ret != 0) - return ret; - - ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, - &dos_name); - if (ret != 0) - return ret; - - fid->dir.dir = p_dir->dir; - fid->dir.size = p_dir->size; - fid->dir.flags = p_dir->flags; - fid->entry = dentry; - - fid->attr = ATTR_ARCHIVE | mode; - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - fid->size = 0; - fid->start_clu = CLUSTER_32(~0); - - fid->type = TYPE_FILE; - fid->rwoffset = 0; - fid->hint_last_off = -1; - - return 0; -} - -void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry) -{ - s32 num_entries; - sector_t sector; - struct dentry_t *ep; - struct super_block *sb = inode->i_sb; - - ep = get_entry_in_dir(sb, p_dir, entry, §or); - if (!ep) - return; - - exfat_buf_lock(sb, sector); - - /* exfat_buf_lock() before call count_ext_entries() */ - num_entries = exfat_count_ext_entries(sb, p_dir, entry, ep); - if (num_entries < 0) { - exfat_buf_unlock(sb, sector); - return; - } - num_entries++; - - exfat_buf_unlock(sb, sector); - - /* (1) update the directory entry */ - exfat_delete_dir_entry(sb, p_dir, entry, 0, num_entries); -} - -s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, - struct uni_name_t *p_uniname, struct file_id_t *fid) -{ - s32 ret, newentry = -1, num_old_entries, num_new_entries; - sector_t sector_old, sector_new; - struct dos_name_t dos_name; - struct dentry_t *epold, *epnew; - struct super_block *sb = inode->i_sb; - - epold = get_entry_in_dir(sb, p_dir, oldentry, §or_old); - if (!epold) - return -ENOENT; - - exfat_buf_lock(sb, sector_old); - - /* exfat_buf_lock() before call count_ext_entries() */ - num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry, - epold); - if (num_old_entries < 0) { - exfat_buf_unlock(sb, sector_old); - return -ENOENT; - } - num_old_entries++; - - ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, - &num_new_entries, &dos_name); - if (ret) { - exfat_buf_unlock(sb, sector_old); - return ret; - } - - if (num_old_entries < num_new_entries) { - newentry = find_empty_entry(inode, p_dir, num_new_entries); - if (newentry < 0) { - exfat_buf_unlock(sb, sector_old); - return -ENOSPC; - } - - epnew = get_entry_in_dir(sb, p_dir, newentry, §or_new); - if (!epnew) { - exfat_buf_unlock(sb, sector_old); - return -ENOENT; - } - - memcpy((void *)epnew, (void *)epold, DENTRY_SIZE); - if (exfat_get_entry_type(epnew) == TYPE_FILE) { - exfat_set_entry_attr(epnew, - exfat_get_entry_attr(epnew) | - ATTR_ARCHIVE); - fid->attr |= ATTR_ARCHIVE; - } - exfat_buf_modify(sb, sector_new); - exfat_buf_unlock(sb, sector_old); - - epold = get_entry_in_dir(sb, p_dir, oldentry + 1, - §or_old); - exfat_buf_lock(sb, sector_old); - epnew = get_entry_in_dir(sb, p_dir, newentry + 1, - §or_new); - - if (!epold || !epnew) { - exfat_buf_unlock(sb, sector_old); - return -ENOENT; - } - - memcpy((void *)epnew, (void *)epold, DENTRY_SIZE); - exfat_buf_modify(sb, sector_new); - exfat_buf_unlock(sb, sector_old); - - ret = exfat_init_ext_entry(sb, p_dir, newentry, - num_new_entries, p_uniname, - &dos_name); - if (ret != 0) - return ret; - - exfat_delete_dir_entry(sb, p_dir, oldentry, 0, - num_old_entries); - fid->entry = newentry; - } else { - if (exfat_get_entry_type(epold) == TYPE_FILE) { - exfat_set_entry_attr(epold, - exfat_get_entry_attr(epold) | - ATTR_ARCHIVE); - fid->attr |= ATTR_ARCHIVE; - } - exfat_buf_modify(sb, sector_old); - exfat_buf_unlock(sb, sector_old); - - ret = exfat_init_ext_entry(sb, p_dir, oldentry, - num_new_entries, p_uniname, - &dos_name); - if (ret != 0) - return ret; - - exfat_delete_dir_entry(sb, p_dir, oldentry, num_new_entries, - num_old_entries); - } - - return 0; -} - -s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, - struct chain_t *p_newdir, struct uni_name_t *p_uniname, - struct file_id_t *fid) -{ - s32 ret, newentry, num_new_entries, num_old_entries; - sector_t sector_mov, sector_new; - struct dos_name_t dos_name; - struct dentry_t *epmov, *epnew; - struct super_block *sb = inode->i_sb; - - epmov = get_entry_in_dir(sb, p_olddir, oldentry, §or_mov); - if (!epmov) - return -ENOENT; - - /* check if the source and target directory is the same */ - if (exfat_get_entry_type(epmov) == TYPE_DIR && - exfat_get_entry_clu0(epmov) == p_newdir->dir) - return -EINVAL; - - exfat_buf_lock(sb, sector_mov); - - /* exfat_buf_lock() before call count_ext_entries() */ - num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry, - epmov); - if (num_old_entries < 0) { - exfat_buf_unlock(sb, sector_mov); - return -ENOENT; - } - num_old_entries++; - - ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname, - &num_new_entries, &dos_name); - if (ret) { - exfat_buf_unlock(sb, sector_mov); - return ret; - } - - newentry = find_empty_entry(inode, p_newdir, num_new_entries); - if (newentry < 0) { - exfat_buf_unlock(sb, sector_mov); - return -ENOSPC; - } - - epnew = get_entry_in_dir(sb, p_newdir, newentry, §or_new); - if (!epnew) { - exfat_buf_unlock(sb, sector_mov); - return -ENOENT; - } - - memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE); - if (exfat_get_entry_type(epnew) == TYPE_FILE) { - exfat_set_entry_attr(epnew, exfat_get_entry_attr(epnew) | - ATTR_ARCHIVE); - fid->attr |= ATTR_ARCHIVE; - } - exfat_buf_modify(sb, sector_new); - exfat_buf_unlock(sb, sector_mov); - - epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1, - §or_mov); - exfat_buf_lock(sb, sector_mov); - epnew = get_entry_in_dir(sb, p_newdir, newentry + 1, - §or_new); - if (!epmov || !epnew) { - exfat_buf_unlock(sb, sector_mov); - return -ENOENT; - } - - memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE); - exfat_buf_modify(sb, sector_new); - exfat_buf_unlock(sb, sector_mov); - - ret = exfat_init_ext_entry(sb, p_newdir, newentry, num_new_entries, - p_uniname, &dos_name); - if (ret != 0) - return ret; - - exfat_delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries); - - fid->dir.dir = p_newdir->dir; - fid->dir.size = p_newdir->size; - fid->dir.flags = p_newdir->flags; - - fid->entry = newentry; - - return 0; -} - -/* - * Sector Read/Write Functions - */ - -int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh, - bool read) -{ - s32 ret = -EIO; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) && - (p_fs->num_sectors > 0)) { - pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n", - __func__, (unsigned long long)sec); - fs_error(sb); - return ret; - } - - if (!p_fs->dev_ejected) { - ret = exfat_bdev_read(sb, sec, bh, 1, read); - if (ret != 0) - p_fs->dev_ejected = 1; - } - - return ret; -} - -int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh, - bool sync) -{ - s32 ret = -EIO; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) && - (p_fs->num_sectors > 0)) { - pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n", - __func__, (unsigned long long)sec); - fs_error(sb); - return ret; - } - - if (!bh) { - pr_err("[EXFAT] %s: bh is NULL!\n", __func__); - fs_error(sb); - return ret; - } - - if (!p_fs->dev_ejected) { - ret = exfat_bdev_write(sb, sec, bh, 1, sync); - if (ret != 0) - p_fs->dev_ejected = 1; - } - - return ret; -} - -int multi_sector_read(struct super_block *sb, sector_t sec, - struct buffer_head **bh, s32 num_secs, bool read) -{ - s32 ret = -EIO; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) && - (p_fs->num_sectors > 0)) { - pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n", - __func__, (unsigned long long)sec, num_secs); - fs_error(sb); - return ret; - } - - if (!p_fs->dev_ejected) { - ret = exfat_bdev_read(sb, sec, bh, num_secs, read); - if (ret != 0) - p_fs->dev_ejected = 1; - } - - return ret; -} - -int multi_sector_write(struct super_block *sb, sector_t sec, - struct buffer_head *bh, s32 num_secs, bool sync) -{ - s32 ret = -EIO; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) && - (p_fs->num_sectors > 0)) { - pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n", - __func__, (unsigned long long)sec, num_secs); - fs_error(sb); - return ret; - } - if (!bh) { - pr_err("[EXFAT] %s: bh is NULL!\n", __func__); - fs_error(sb); - return ret; - } - - if (!p_fs->dev_ejected) { - ret = exfat_bdev_write(sb, sec, bh, num_secs, sync); - if (ret != 0) - p_fs->dev_ejected = 1; - } - - return ret; -} diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c deleted file mode 100644 index 91e8b0c4dce7..000000000000 --- a/drivers/staging/exfat/exfat_nls.c +++ /dev/null @@ -1,212 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/string.h> -#include <linux/nls.h> -#include "exfat.h" - -static u16 bad_uni_chars[] = { - /* " * / : < > ? \ | */ - 0x0022, 0x002A, 0x002F, 0x003A, - 0x003C, 0x003E, 0x003F, 0x005C, 0x007C, - 0 -}; - -static int convert_ch_to_uni(struct nls_table *nls, u16 *uni, u8 *ch, - bool *lossy) -{ - int len; - - *uni = 0x0; - - if (ch[0] < 0x80) { - *uni = (u16)ch[0]; - return 1; - } - - len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni); - if (len < 0) { - /* conversion failed */ - pr_info("%s: fail to use nls\n", __func__); - if (lossy) - *lossy = true; - *uni = (u16)'_'; - if (!strcmp(nls->charset, "utf8")) - return 1; - else - return 2; - } - - return len; -} - -static int convert_uni_to_ch(struct nls_table *nls, u8 *ch, u16 uni, - bool *lossy) -{ - int len; - - ch[0] = 0x0; - - if (uni < 0x0080) { - ch[0] = (u8)uni; - return 1; - } - - len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE); - if (len < 0) { - /* conversion failed */ - pr_info("%s: fail to use nls\n", __func__); - if (lossy) - *lossy = true; - ch[0] = '_'; - return 1; - } - - return len; -} - -u16 nls_upper(struct super_block *sb, u16 a) -{ - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - if (EXFAT_SB(sb)->options.casesensitive) - return a; - if (p_fs->vol_utbl && p_fs->vol_utbl[get_col_index(a)]) - return p_fs->vol_utbl[get_col_index(a)][get_row_index(a)]; - else - return a; -} - -static u16 *nls_wstrchr(u16 *str, u16 wchar) -{ - while (*str) { - if (*(str++) == wchar) - return str; - } - - return NULL; -} - -int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b) -{ - int i; - - for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) { - if (nls_upper(sb, *a) != nls_upper(sb, *b)) - return 1; - if (*a == 0x0) - return 0; - } - return 0; -} - -void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring, - struct uni_name_t *p_uniname) -{ - int i, j, len; - u8 buf[MAX_CHARSET_SIZE]; - u16 *uniname = p_uniname->name; - struct nls_table *nls = EXFAT_SB(sb)->nls_io; - - if (!nls) { - len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH, - UTF16_HOST_ENDIAN, p_cstring, - MAX_NAME_LENGTH); - p_cstring[len] = 0; - return; - } - - i = 0; - while (i < (MAX_NAME_LENGTH - 1)) { - if (*uniname == (u16)'\0') - break; - - len = convert_uni_to_ch(nls, buf, *uniname, NULL); - - if (len > 1) { - for (j = 0; j < len; j++) - *p_cstring++ = (char)*(buf + j); - } else { /* len == 1 */ - *p_cstring++ = (char)*buf; - } - - uniname++; - i++; - } - - *p_cstring = '\0'; -} - -void nls_cstring_to_uniname(struct super_block *sb, - struct uni_name_t *p_uniname, u8 *p_cstring, - bool *p_lossy) -{ - int i, j; - bool lossy = false; - u8 *end_of_name; - u8 upname[MAX_NAME_LENGTH * 2]; - u16 *uniname = p_uniname->name; - struct nls_table *nls = EXFAT_SB(sb)->nls_io; - - /* strip all trailing spaces */ - end_of_name = p_cstring + strlen(p_cstring); - - while (*(--end_of_name) == ' ') { - if (end_of_name < p_cstring) - break; - } - *(++end_of_name) = '\0'; - - if (strcmp(p_cstring, ".") && strcmp(p_cstring, "..")) { - /* strip all trailing periods */ - while (*(--end_of_name) == '.') { - if (end_of_name < p_cstring) - break; - } - *(++end_of_name) = '\0'; - } - - if (*p_cstring == '\0') - lossy = true; - - if (!nls) { - i = utf8s_to_utf16s(p_cstring, MAX_NAME_LENGTH, - UTF16_HOST_ENDIAN, uniname, - MAX_NAME_LENGTH); - for (j = 0; j < i; j++) - SET16_A(upname + j * 2, nls_upper(sb, uniname[j])); - uniname[i] = '\0'; - } else { - i = 0; - j = 0; - while (j < (MAX_NAME_LENGTH - 1)) { - if (*(p_cstring + i) == '\0') - break; - - i += convert_ch_to_uni(nls, uniname, - (u8 *)(p_cstring + i), &lossy); - - if ((*uniname < 0x0020) || - nls_wstrchr(bad_uni_chars, *uniname)) - lossy = true; - - SET16_A(upname + j * 2, nls_upper(sb, *uniname)); - - uniname++; - j++; - } - - if (*(p_cstring + i) != '\0') - lossy = true; - *uniname = (u16)'\0'; - } - - p_uniname->name_len = j; - p_uniname->name_hash = calc_checksum_2byte(upname, j << 1, 0, - CS_DEFAULT); - - if (p_lossy) - *p_lossy = lossy; -} diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c deleted file mode 100644 index b81d2a87b82e..000000000000 --- a/drivers/staging/exfat/exfat_super.c +++ /dev/null @@ -1,3883 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/time.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/seq_file.h> -#include <linux/pagemap.h> -#include <linux/mpage.h> -#include <linux/buffer_head.h> -#include <linux/exportfs.h> -#include <linux/mount.h> -#include <linux/vfs.h> -#include <linux/aio.h> -#include <linux/iversion.h> -#include <linux/parser.h> -#include <linux/uio.h> -#include <linux/writeback.h> -#include <linux/log2.h> -#include <linux/hash.h> -#include <linux/backing-dev.h> -#include <linux/sched.h> -#include <linux/fs_struct.h> -#include <linux/namei.h> -#include <linux/random.h> -#include <linux/string.h> -#include <linux/nls.h> -#include <linux/mutex.h> -#include <linux/swap.h> - -#define EXFAT_VERSION "1.3.0" - -#include "exfat.h" - -static struct kmem_cache *exfat_inode_cachep; - -static int exfat_default_codepage = CONFIG_STAGING_EXFAT_DEFAULT_CODEPAGE; -static char exfat_default_iocharset[] = CONFIG_STAGING_EXFAT_DEFAULT_IOCHARSET; - -#define INC_IVERSION(x) (inode_inc_iversion(x)) -#define GET_IVERSION(x) (inode_peek_iversion_raw(x)) -#define SET_IVERSION(x, y) (inode_set_iversion(x, y)) - -static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos); -static int exfat_sync_inode(struct inode *inode); -static struct inode *exfat_build_inode(struct super_block *sb, - struct file_id_t *fid, loff_t i_pos); -static int exfat_write_inode(struct inode *inode, - struct writeback_control *wbc); -static void exfat_write_super(struct super_block *sb); - -#define UNIX_SECS_1980 315532800L -#define UNIX_SECS_2108 4354819200L - -/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ -static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp) -{ - ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day, - tp->Hour, tp->Minute, tp->Second); - - ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC; -} - -/* Convert linear UNIX date to a FAT time/date pair. */ -static void exfat_time_unix2fat(struct timespec64 *ts, struct date_time_t *tp) -{ - time64_t second = ts->tv_sec; - struct tm tm; - - time64_to_tm(second, 0, &tm); - - if (second < UNIX_SECS_1980) { - tp->MilliSecond = 0; - tp->Second = 0; - tp->Minute = 0; - tp->Hour = 0; - tp->Day = 1; - tp->Month = 1; - tp->Year = 0; - return; - } - - if (second >= UNIX_SECS_2108) { - tp->MilliSecond = 999; - tp->Second = 59; - tp->Minute = 59; - tp->Hour = 23; - tp->Day = 31; - tp->Month = 12; - tp->Year = 127; - return; - } - - tp->MilliSecond = ts->tv_nsec / NSEC_PER_MSEC; - tp->Second = tm.tm_sec; - tp->Minute = tm.tm_min; - tp->Hour = tm.tm_hour; - tp->Day = tm.tm_mday; - tp->Month = tm.tm_mon + 1; - tp->Year = tm.tm_year + 1900 - 1980; -} - -struct timestamp_t *tm_current(struct timestamp_t *tp) -{ - time64_t second = ktime_get_real_seconds(); - struct tm tm; - - time64_to_tm(second, 0, &tm); - - if (second < UNIX_SECS_1980) { - tp->sec = 0; - tp->min = 0; - tp->hour = 0; - tp->day = 1; - tp->mon = 1; - tp->year = 0; - return tp; - } - - if (second >= UNIX_SECS_2108) { - tp->sec = 59; - tp->min = 59; - tp->hour = 23; - tp->day = 31; - tp->mon = 12; - tp->year = 127; - return tp; - } - - tp->sec = tm.tm_sec; - tp->min = tm.tm_min; - tp->hour = tm.tm_hour; - tp->day = tm.tm_mday; - tp->mon = tm.tm_mon + 1; - tp->year = tm.tm_year + 1900 - 1980; - - return tp; -} - -static void __lock_super(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - mutex_lock(&sbi->s_lock); -} - -static void __unlock_super(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - mutex_unlock(&sbi->s_lock); -} - -static int __is_sb_dirty(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - return sbi->s_dirt; -} - -static void __set_sb_clean(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - sbi->s_dirt = 0; -} - -static int __exfat_revalidate(struct dentry *dentry) -{ - return 0; -} - -static int exfat_revalidate(struct dentry *dentry, unsigned int flags) -{ - if (flags & LOOKUP_RCU) - return -ECHILD; - - if (dentry->d_inode) - return 1; - return __exfat_revalidate(dentry); -} - -static int exfat_revalidate_ci(struct dentry *dentry, unsigned int flags) -{ - if (flags & LOOKUP_RCU) - return -ECHILD; - - if (dentry->d_inode) - return 1; - - if (!flags) - return 0; - - if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) - return 0; - - return __exfat_revalidate(dentry); -} - -static unsigned int __exfat_striptail_len(unsigned int len, const char *name) -{ - while (len && name[len - 1] == '.') - len--; - return len; -} - -static unsigned int exfat_striptail_len(const struct qstr *qstr) -{ - return __exfat_striptail_len(qstr->len, qstr->name); -} - -static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr) -{ - qstr->hash = full_name_hash(dentry, qstr->name, - exfat_striptail_len(qstr)); - return 0; -} - -static int exfat_d_hashi(const struct dentry *dentry, struct qstr *qstr) -{ - struct super_block *sb = dentry->d_sb; - const unsigned char *name; - unsigned int len; - unsigned long hash; - - name = qstr->name; - len = exfat_striptail_len(qstr); - - hash = init_name_hash(dentry); - while (len--) - hash = partial_name_hash(nls_upper(sb, *name++), hash); - qstr->hash = end_name_hash(hash); - - return 0; -} - -static int exfat_cmpi(const struct dentry *dentry, unsigned int len, - const char *str, const struct qstr *name) -{ - struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io; - unsigned int alen, blen; - - alen = exfat_striptail_len(name); - blen = __exfat_striptail_len(len, str); - if (alen == blen) { - if (!t) { - if (strncasecmp(name->name, str, alen) == 0) - return 0; - } else { - if (nls_strnicmp(t, name->name, str, alen) == 0) - return 0; - } - } - return 1; -} - -static int exfat_cmp(const struct dentry *dentry, unsigned int len, - const char *str, const struct qstr *name) -{ - unsigned int alen, blen; - - alen = exfat_striptail_len(name); - blen = __exfat_striptail_len(len, str); - if (alen == blen) { - if (strncmp(name->name, str, alen) == 0) - return 0; - } - return 1; -} - -static const struct dentry_operations exfat_ci_dentry_ops = { - .d_revalidate = exfat_revalidate_ci, - .d_hash = exfat_d_hashi, - .d_compare = exfat_cmpi, -}; - -static const struct dentry_operations exfat_dentry_ops = { - .d_revalidate = exfat_revalidate, - .d_hash = exfat_d_hash, - .d_compare = exfat_cmp, -}; - -static DEFINE_MUTEX(z_mutex); - -static inline void fs_sync(struct super_block *sb, bool do_sync) -{ - if (do_sync) - exfat_bdev_sync(sb); -} - -/* - * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to - * save ATTR_RO instead of ->i_mode. - * - * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only - * bit, it's just used as flag for app. - */ -static inline int exfat_mode_can_hold_ro(struct inode *inode) -{ - struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); - - if (S_ISDIR(inode->i_mode)) - return 0; - - if ((~sbi->options.fs_fmask) & 0222) - return 1; - return 0; -} - -/* Convert attribute bits and a mask to the UNIX mode. */ -static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, u32 attr, - mode_t mode) -{ - if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR)) - mode &= ~0222; - - if (attr & ATTR_SUBDIR) - return (mode & ~sbi->options.fs_dmask) | S_IFDIR; - else if (attr & ATTR_SYMLINK) - return (mode & ~sbi->options.fs_dmask) | S_IFLNK; - else - return (mode & ~sbi->options.fs_fmask) | S_IFREG; -} - -/* Return the FAT attribute byte for this inode */ -static inline u32 exfat_make_attr(struct inode *inode) -{ - if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222)) - return (EXFAT_I(inode)->fid.attr) | ATTR_READONLY; - else - return EXFAT_I(inode)->fid.attr; -} - -static inline void exfat_save_attr(struct inode *inode, u32 attr) -{ - if (exfat_mode_can_hold_ro(inode)) - EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK; - else - EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY); -} - -static int ffsMountVol(struct super_block *sb) -{ - int i, ret; - struct pbr_sector_t *p_pbr; - struct buffer_head *tmp_bh = NULL; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - pr_info("[EXFAT] trying to mount...\n"); - - mutex_lock(&z_mutex); - - exfat_buf_init(sb); - - mutex_init(&p_fs->v_mutex); - p_fs->dev_ejected = 0; - - /* open the block device */ - exfat_bdev_open(sb); - - if (p_bd->sector_size < sb->s_blocksize) { - printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n", - p_bd->sector_size, sb->s_blocksize); - ret = -EINVAL; - goto out; - } - if (p_bd->sector_size > sb->s_blocksize) - sb_set_blocksize(sb, p_bd->sector_size); - - /* read Sector 0 */ - if (sector_read(sb, 0, &tmp_bh, 1) != 0) { - ret = -EIO; - goto out; - } - - p_fs->PBR_sector = 0; - - p_pbr = (struct pbr_sector_t *)tmp_bh->b_data; - - /* check the validity of PBR */ - if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) { - brelse(tmp_bh); - exfat_bdev_close(sb); - ret = -EFSCORRUPTED; - goto out; - } - - /* fill fs_struct */ - for (i = 0; i < 53; i++) - if (p_pbr->bpb[i]) - break; - - if (i < 53) { - /* Not sure how we'd get here, but complain if it does */ - ret = -EINVAL; - pr_info("EXFAT: Attempted to mount VFAT filesystem\n"); - goto out; - } else { - ret = exfat_mount(sb, p_pbr); - } - - brelse(tmp_bh); - - if (ret) { - exfat_bdev_close(sb); - goto out; - } - - ret = load_alloc_bitmap(sb); - if (ret) { - exfat_bdev_close(sb); - goto out; - } - ret = load_upcase_table(sb); - if (ret) { - free_alloc_bitmap(sb); - exfat_bdev_close(sb); - goto out; - } - - if (p_fs->dev_ejected) { - free_upcase_table(sb); - free_alloc_bitmap(sb); - exfat_bdev_close(sb); - ret = -EIO; - goto out; - } - - pr_info("[EXFAT] mounted successfully\n"); - -out: - mutex_unlock(&z_mutex); - - return ret; -} - -static int ffsUmountVol(struct super_block *sb) -{ - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - int err = 0; - - pr_info("[EXFAT] trying to unmount...\n"); - - mutex_lock(&z_mutex); - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); - - free_upcase_table(sb); - free_alloc_bitmap(sb); - - exfat_fat_release_all(sb); - exfat_buf_release_all(sb); - - /* close the block device */ - exfat_bdev_close(sb); - - if (p_fs->dev_ejected) { - pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n"); - err = -EIO; - } - - exfat_buf_shutdown(sb); - - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - mutex_unlock(&z_mutex); - - pr_info("[EXFAT] unmounted successfully\n"); - - return err; -} - -static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info) -{ - int err = 0; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - /* check the validity of pointer parameters */ - if (!info) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - if (p_fs->used_clusters == UINT_MAX) - p_fs->used_clusters = exfat_count_used_clusters(sb); - - info->FatType = p_fs->vol_type; - info->ClusterSize = p_fs->cluster_size; - info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */ - info->UsedClusters = p_fs->used_clusters; - info->FreeClusters = info->NumClusters - info->UsedClusters; - - if (p_fs->dev_ejected) - err = -EIO; - - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return err; -} - -static int ffsSyncVol(struct super_block *sb, bool do_sync) -{ - int err = 0; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* synchronize the file system */ - fs_sync(sb, do_sync); - fs_set_vol_flags(sb, VOL_CLEAN); - - if (p_fs->dev_ejected) - err = -EIO; - - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return err; -} - -/*----------------------------------------------------------------------*/ -/* File Operation Functions */ -/*----------------------------------------------------------------------*/ - -static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid) -{ - int ret, dentry, num_entries; - struct chain_t dir; - struct uni_name_t uni_name; - struct dos_name_t dos_name; - struct dentry_t *ep, *ep2; - struct entry_set_cache_t *es = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - pr_debug("%s entered\n", __func__); - - /* check the validity of pointer parameters */ - if (!fid || !path || (*path == '\0')) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check the validity of directory name in the given pathname */ - ret = resolve_path(inode, path, &dir, &uni_name); - if (ret) - goto out; - - ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries, - &dos_name); - if (ret) - goto out; - - /* search the file name for directories */ - dentry = exfat_find_dir_entry(sb, &dir, &uni_name, num_entries, - &dos_name, TYPE_ALL); - if (dentry < -1) { - ret = -ENOENT; - goto out; - } - - fid->dir.dir = dir.dir; - fid->dir.size = dir.size; - fid->dir.flags = dir.flags; - fid->entry = dentry; - - if (dentry == -1) { - fid->type = TYPE_DIR; - fid->rwoffset = 0; - fid->hint_last_off = -1; - - fid->attr = ATTR_SUBDIR; - fid->flags = 0x01; - fid->size = 0; - fid->start_clu = p_fs->root_dir; - } else { - es = get_entry_set_in_dir(sb, &dir, dentry, - ES_2_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - ep2 = ep + 1; - - fid->type = exfat_get_entry_type(ep); - fid->rwoffset = 0; - fid->hint_last_off = -1; - fid->attr = exfat_get_entry_attr(ep); - - fid->size = exfat_get_entry_size(ep2); - if ((fid->type == TYPE_FILE) && (fid->size == 0)) { - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - fid->start_clu = CLUSTER_32(~0); - } else { - fid->flags = exfat_get_entry_flag(ep2); - fid->start_clu = exfat_get_entry_clu0(ep2); - } - - release_entry_set(es); - } - - if (p_fs->dev_ejected) - ret = -EIO; -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsCreateFile(struct inode *inode, char *path, u8 mode, - struct file_id_t *fid) -{ - struct chain_t dir; - struct uni_name_t uni_name; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - int ret = 0; - - /* check the validity of pointer parameters */ - if (!fid || !path || (*path == '\0')) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check the validity of directory name in the given pathname */ - ret = resolve_path(inode, path, &dir, &uni_name); - if (ret) - goto out; - - fs_set_vol_flags(sb, VOL_DIRTY); - - /* create a new file */ - ret = create_file(inode, &dir, &uni_name, mode, fid); - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer, - u64 count, u64 *rcount) -{ - s32 offset, sec_offset, clu_offset; - u32 clu; - int ret = 0; - sector_t LogSector; - u64 oneblkread, read_bytes; - struct buffer_head *tmp_bh = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - /* check the validity of the given file id */ - if (!fid) - return -EINVAL; - - /* check the validity of pointer parameters */ - if (!buffer) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check if the given file ID is opened */ - if (fid->type != TYPE_FILE) { - ret = -EPERM; - goto out; - } - - if (fid->rwoffset > fid->size) - fid->rwoffset = fid->size; - - if (count > (fid->size - fid->rwoffset)) - count = fid->size - fid->rwoffset; - - if (count == 0) { - if (rcount) - *rcount = 0; - ret = 0; - goto out; - } - - read_bytes = 0; - - while (count > 0) { - clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); - clu = fid->start_clu; - - if (fid->flags == 0x03) { - clu += clu_offset; - } else { - /* hint information */ - if ((clu_offset > 0) && (fid->hint_last_off > 0) && - (clu_offset >= fid->hint_last_off)) { - clu_offset -= fid->hint_last_off; - clu = fid->hint_last_clu; - } - - while (clu_offset > 0) { - /* clu = exfat_fat_read(sb, clu); */ - if (exfat_fat_read(sb, clu, &clu) == -1) { - ret = -EIO; - goto out; - } - - clu_offset--; - } - } - - /* hint information */ - fid->hint_last_off = (s32)(fid->rwoffset >> - p_fs->cluster_size_bits); - fid->hint_last_clu = clu; - - /* byte offset in cluster */ - offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1)); - - /* sector offset in cluster */ - sec_offset = offset >> p_bd->sector_size_bits; - - /* byte offset in sector */ - offset &= p_bd->sector_size_mask; - - LogSector = START_SECTOR(clu) + sec_offset; - - oneblkread = (u64)(p_bd->sector_size - offset); - if (oneblkread > count) - oneblkread = count; - - if ((offset == 0) && (oneblkread == p_bd->sector_size)) { - if (sector_read(sb, LogSector, &tmp_bh, 1) != - 0) - goto err_out; - memcpy((char *)buffer + read_bytes, - (char *)tmp_bh->b_data, (s32)oneblkread); - } else { - if (sector_read(sb, LogSector, &tmp_bh, 1) != - 0) - goto err_out; - memcpy((char *)buffer + read_bytes, - (char *)tmp_bh->b_data + offset, - (s32)oneblkread); - } - count -= oneblkread; - read_bytes += oneblkread; - fid->rwoffset += oneblkread; - } - brelse(tmp_bh); - -/* How did this ever work and not leak a brlse()?? */ -err_out: - /* set the size of read bytes */ - if (rcount) - *rcount = read_bytes; - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsWriteFile(struct inode *inode, struct file_id_t *fid, - void *buffer, u64 count, u64 *wcount) -{ - bool modified = false; - s32 offset, sec_offset, clu_offset; - s32 num_clusters, num_alloc, num_alloced = (s32)~0; - int ret = 0; - u32 clu, last_clu; - sector_t LogSector; - u64 oneblkwrite, write_bytes; - struct chain_t new_clu; - struct timestamp_t tm; - struct dentry_t *ep, *ep2; - struct entry_set_cache_t *es = NULL; - struct buffer_head *tmp_bh = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - - /* check the validity of the given file id */ - if (!fid) - return -EINVAL; - - /* check the validity of pointer parameters */ - if (!buffer) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check if the given file ID is opened */ - if (fid->type != TYPE_FILE) { - ret = -EPERM; - goto out; - } - - if (fid->rwoffset > fid->size) - fid->rwoffset = fid->size; - - if (count == 0) { - if (wcount) - *wcount = 0; - ret = 0; - goto out; - } - - fs_set_vol_flags(sb, VOL_DIRTY); - - if (fid->size == 0) - num_clusters = 0; - else - num_clusters = (s32)((fid->size - 1) >> - p_fs->cluster_size_bits) + 1; - - write_bytes = 0; - - while (count > 0) { - clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); - clu = fid->start_clu; - last_clu = fid->start_clu; - - if (fid->flags == 0x03) { - if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { - last_clu += clu_offset - 1; - - if (clu_offset == num_clusters) - clu = CLUSTER_32(~0); - else - clu += clu_offset; - } - } else { - /* hint information */ - if ((clu_offset > 0) && (fid->hint_last_off > 0) && - (clu_offset >= fid->hint_last_off)) { - clu_offset -= fid->hint_last_off; - clu = fid->hint_last_clu; - } - - while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { - last_clu = clu; - /* clu = exfat_fat_read(sb, clu); */ - if (exfat_fat_read(sb, clu, &clu) == -1) { - ret = -EIO; - goto out; - } - clu_offset--; - } - } - - if (clu == CLUSTER_32(~0)) { - num_alloc = (s32)((count - 1) >> - p_fs->cluster_size_bits) + 1; - new_clu.dir = (last_clu == CLUSTER_32(~0)) ? - CLUSTER_32(~0) : last_clu + 1; - new_clu.size = 0; - new_clu.flags = fid->flags; - - /* (1) allocate a chain of clusters */ - num_alloced = exfat_alloc_cluster(sb, - num_alloc, - &new_clu); - if (num_alloced == 0) - break; - if (num_alloced < 0) { - ret = num_alloced; - goto out; - } - - /* (2) append to the FAT chain */ - if (last_clu == CLUSTER_32(~0)) { - if (new_clu.flags == 0x01) - fid->flags = 0x01; - fid->start_clu = new_clu.dir; - modified = true; - } else { - if (new_clu.flags != fid->flags) { - exfat_chain_cont_cluster(sb, - fid->start_clu, - num_clusters); - fid->flags = 0x01; - modified = true; - } - if (new_clu.flags == 0x01) - exfat_fat_write(sb, last_clu, new_clu.dir); - } - - num_clusters += num_alloced; - clu = new_clu.dir; - } - - /* hint information */ - fid->hint_last_off = (s32)(fid->rwoffset >> - p_fs->cluster_size_bits); - fid->hint_last_clu = clu; - - /* byte offset in cluster */ - offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1)); - - /* sector offset in cluster */ - sec_offset = offset >> p_bd->sector_size_bits; - - /* byte offset in sector */ - offset &= p_bd->sector_size_mask; - - LogSector = START_SECTOR(clu) + sec_offset; - - oneblkwrite = (u64)(p_bd->sector_size - offset); - if (oneblkwrite > count) - oneblkwrite = count; - - if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) { - if (sector_read(sb, LogSector, &tmp_bh, 0) != - 0) - goto err_out; - memcpy((char *)tmp_bh->b_data, - (char *)buffer + write_bytes, (s32)oneblkwrite); - if (sector_write(sb, LogSector, tmp_bh, 0) != - 0) { - brelse(tmp_bh); - goto err_out; - } - } else { - if ((offset > 0) || - ((fid->rwoffset + oneblkwrite) < fid->size)) { - if (sector_read(sb, LogSector, &tmp_bh, 1) != - 0) - goto err_out; - } else { - if (sector_read(sb, LogSector, &tmp_bh, 0) != - 0) - goto err_out; - } - - memcpy((char *)tmp_bh->b_data + offset, - (char *)buffer + write_bytes, (s32)oneblkwrite); - if (sector_write(sb, LogSector, tmp_bh, 0) != - 0) { - brelse(tmp_bh); - goto err_out; - } - } - - count -= oneblkwrite; - write_bytes += oneblkwrite; - fid->rwoffset += oneblkwrite; - - fid->attr |= ATTR_ARCHIVE; - - if (fid->size < fid->rwoffset) { - fid->size = fid->rwoffset; - modified = true; - } - } - - brelse(tmp_bh); - - /* (3) update the direcoty entry */ - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_ALL_ENTRIES, &ep); - if (!es) - goto err_out; - ep2 = ep + 1; - - exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY); - exfat_set_entry_attr(ep, fid->attr); - - if (modified) { - if (exfat_get_entry_flag(ep2) != fid->flags) - exfat_set_entry_flag(ep2, fid->flags); - - if (exfat_get_entry_size(ep2) != fid->size) - exfat_set_entry_size(ep2, fid->size); - - if (exfat_get_entry_clu0(ep2) != fid->start_clu) - exfat_set_entry_clu0(ep2, fid->start_clu); - } - - update_dir_checksum_with_entry_set(sb, es); - release_entry_set(es); - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - -err_out: - /* set the size of written bytes */ - if (wcount) - *wcount = write_bytes; - - if (num_alloced == 0) - ret = -ENOSPC; - - else if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size) -{ - s32 num_clusters; - u32 last_clu = CLUSTER_32(0); - int ret = 0; - struct chain_t clu; - struct timestamp_t tm; - struct dentry_t *ep, *ep2; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - struct entry_set_cache_t *es = NULL; - - pr_debug("%s entered (inode %p size %llu)\n", __func__, inode, - new_size); - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check if the given file ID is opened */ - if (fid->type != TYPE_FILE) { - ret = -EPERM; - goto out; - } - - if (fid->size != old_size) { - pr_err("[EXFAT] truncate : can't skip it because of size-mismatch(old:%lld->fid:%lld).\n", - old_size, fid->size); - } - - if (old_size <= new_size) { - ret = 0; - goto out; - } - - fs_set_vol_flags(sb, VOL_DIRTY); - - clu.dir = fid->start_clu; - clu.size = (s32)((old_size - 1) >> p_fs->cluster_size_bits) + 1; - clu.flags = fid->flags; - - if (new_size > 0) { - num_clusters = (s32)((new_size - 1) >> - p_fs->cluster_size_bits) + 1; - - if (clu.flags == 0x03) { - clu.dir += num_clusters; - } else { - while (num_clusters > 0) { - last_clu = clu.dir; - if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) { - ret = -EIO; - goto out; - } - num_clusters--; - } - } - - clu.size -= num_clusters; - } - - fid->size = new_size; - fid->attr |= ATTR_ARCHIVE; - if (new_size == 0) { - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - fid->start_clu = CLUSTER_32(~0); - } - - /* (1) update the directory entry */ - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_ALL_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - ep2 = ep + 1; - - exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY); - exfat_set_entry_attr(ep, fid->attr); - - exfat_set_entry_size(ep2, new_size); - if (new_size == 0) { - exfat_set_entry_flag(ep2, 0x01); - exfat_set_entry_clu0(ep2, CLUSTER_32(0)); - } - - update_dir_checksum_with_entry_set(sb, es); - release_entry_set(es); - - /* (2) cut off from the FAT chain */ - if (last_clu != CLUSTER_32(0)) { - if (fid->flags == 0x01) - exfat_fat_write(sb, last_clu, CLUSTER_32(~0)); - } - - /* (3) free the clusters */ - exfat_free_cluster(sb, &clu, 0); - - /* hint information */ - fid->hint_last_off = -1; - if (fid->rwoffset > fid->size) - fid->rwoffset = fid->size; - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - pr_debug("%s exited (%d)\n", __func__, ret); - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static void update_parent_info(struct file_id_t *fid, - struct inode *parent_inode) -{ - struct fs_info_t *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info); - struct file_id_t *parent_fid = &(EXFAT_I(parent_inode)->fid); - - if (unlikely((parent_fid->flags != fid->dir.flags) || - (parent_fid->size != - (fid->dir.size << p_fs->cluster_size_bits)) || - (parent_fid->start_clu != fid->dir.dir))) { - fid->dir.dir = parent_fid->start_clu; - fid->dir.flags = parent_fid->flags; - fid->dir.size = ((parent_fid->size + (p_fs->cluster_size - 1)) - >> p_fs->cluster_size_bits); - } -} - -static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid, - struct inode *new_parent_inode, struct dentry *new_dentry) -{ - s32 ret; - s32 dentry; - struct chain_t olddir, newdir; - struct chain_t *p_dir = NULL; - struct uni_name_t uni_name; - struct dentry_t *ep; - struct super_block *sb = old_parent_inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - u8 *new_path = (u8 *)new_dentry->d_name.name; - struct inode *new_inode = new_dentry->d_inode; - int num_entries; - struct file_id_t *new_fid = NULL; - s32 new_entry = 0; - - /* check the validity of the given file id */ - if (!fid) - return -EINVAL; - - /* check the validity of pointer parameters */ - if (!new_path || (*new_path == '\0')) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - update_parent_info(fid, old_parent_inode); - - olddir.dir = fid->dir.dir; - olddir.size = fid->dir.size; - olddir.flags = fid->dir.flags; - - dentry = fid->entry; - - /* check if the old file is "." or ".." */ - if (p_fs->vol_type != EXFAT) { - if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) { - ret = -EPERM; - goto out2; - } - } - - ep = get_entry_in_dir(sb, &olddir, dentry, NULL); - if (!ep) { - ret = -ENOENT; - goto out2; - } - - if (exfat_get_entry_attr(ep) & ATTR_READONLY) { - ret = -EPERM; - goto out2; - } - - /* check whether new dir is existing directory and empty */ - if (new_inode) { - u32 entry_type; - - ret = -ENOENT; - new_fid = &EXFAT_I(new_inode)->fid; - - update_parent_info(new_fid, new_parent_inode); - - p_dir = &new_fid->dir; - new_entry = new_fid->entry; - ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); - if (!ep) - goto out; - - entry_type = exfat_get_entry_type(ep); - - if (entry_type == TYPE_DIR) { - struct chain_t new_clu; - - new_clu.dir = new_fid->start_clu; - new_clu.size = (s32)((new_fid->size - 1) >> - p_fs->cluster_size_bits) + 1; - new_clu.flags = new_fid->flags; - - if (!is_dir_empty(sb, &new_clu)) { - ret = -EEXIST; - goto out; - } - } - } - - /* check the validity of directory name in the given new pathname */ - ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name); - if (ret) - goto out2; - - fs_set_vol_flags(sb, VOL_DIRTY); - - if (olddir.dir == newdir.dir) - ret = exfat_rename_file(new_parent_inode, &olddir, dentry, - &uni_name, fid); - else - ret = move_file(new_parent_inode, &olddir, dentry, &newdir, - &uni_name, fid); - - if ((ret == 0) && new_inode) { - /* delete entries of new_dir */ - ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); - if (!ep) - goto out; - - num_entries = exfat_count_ext_entries(sb, p_dir, - new_entry, ep); - if (num_entries < 0) - goto out; - exfat_delete_dir_entry(sb, p_dir, new_entry, 0, - num_entries + 1); - } -out: -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; -out2: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid) -{ - s32 dentry; - int ret = 0; - struct chain_t dir, clu_to_free; - struct dentry_t *ep; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - /* check the validity of the given file id */ - if (!fid) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - dir.dir = fid->dir.dir; - dir.size = fid->dir.size; - dir.flags = fid->dir.flags; - - dentry = fid->entry; - - ep = get_entry_in_dir(sb, &dir, dentry, NULL); - if (!ep) { - ret = -ENOENT; - goto out; - } - - if (exfat_get_entry_attr(ep) & ATTR_READONLY) { - ret = -EPERM; - goto out; - } - fs_set_vol_flags(sb, VOL_DIRTY); - - /* (1) update the directory entry */ - remove_file(inode, &dir, dentry); - - clu_to_free.dir = fid->start_clu; - clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1; - clu_to_free.flags = fid->flags; - - /* (2) free the clusters */ - exfat_free_cluster(sb, &clu_to_free, 0); - - fid->size = 0; - fid->start_clu = CLUSTER_32(~0); - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -#if 0 -/* Not currently wired up */ -static int ffsSetAttr(struct inode *inode, u32 attr) -{ - u32 type; - int ret = 0; - sector_t sector = 0; - struct dentry_t *ep; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; - struct entry_set_cache_t *es = NULL; - - if (fid->attr == attr) { - if (p_fs->dev_ejected) - return -EIO; - return 0; - } - - if (is_dir) { - if ((fid->dir.dir == p_fs->root_dir) && - (fid->entry == -1)) { - if (p_fs->dev_ejected) - return -EIO; - return 0; - } - } - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* get the directory entry of given file */ - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_ALL_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - - type = exfat_get_entry_type(ep); - - if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) || - ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) { - if (p_fs->dev_ejected) - ret = -EIO; - else - ret = -EINVAL; - - release_entry_set(es); - goto out; - } - - fs_set_vol_flags(sb, VOL_DIRTY); - - /* set the file attribute */ - fid->attr = attr; - exfat_set_entry_attr(ep, attr); - - update_dir_checksum_with_entry_set(sb, es); - release_entry_set(es); - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} -#endif - -static int ffsReadStat(struct inode *inode, struct dir_entry_t *info) -{ - s32 count; - int ret = 0; - struct chain_t dir; - struct uni_name_t uni_name; - struct timestamp_t tm; - struct dentry_t *ep, *ep2; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - struct entry_set_cache_t *es = NULL; - u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; - - pr_debug("%s entered\n", __func__); - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - if (is_dir) { - if ((fid->dir.dir == p_fs->root_dir) && - (fid->entry == -1)) { - info->Attr = ATTR_SUBDIR; - memset((char *)&info->CreateTimestamp, 0, - sizeof(struct date_time_t)); - memset((char *)&info->ModifyTimestamp, 0, - sizeof(struct date_time_t)); - memset((char *)&info->AccessTimestamp, 0, - sizeof(struct date_time_t)); - strcpy(info->ShortName, "."); - strcpy(info->Name, "."); - - dir.dir = p_fs->root_dir; - dir.flags = 0x01; - - if (p_fs->root_dir == CLUSTER_32(0)) { - /* FAT16 root_dir */ - info->Size = p_fs->dentries_in_root << - DENTRY_SIZE_BITS; - } else { - info->Size = count_num_clusters(sb, &dir) << - p_fs->cluster_size_bits; - } - - count = count_dos_name_entries(sb, &dir, TYPE_DIR); - if (count < 0) { - ret = count; /* propagate error upward */ - goto out; - } - info->NumSubdirs = count; - - if (p_fs->dev_ejected) - ret = -EIO; - goto out; - } - } - - /* get the directory entry of given file or directory */ - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_2_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - ep2 = ep + 1; - - /* set FILE_INFO structure using the acquired struct dentry_t */ - info->Attr = exfat_get_entry_attr(ep); - - exfat_get_entry_time(ep, &tm, TM_CREATE); - info->CreateTimestamp.Year = tm.year; - info->CreateTimestamp.Month = tm.mon; - info->CreateTimestamp.Day = tm.day; - info->CreateTimestamp.Hour = tm.hour; - info->CreateTimestamp.Minute = tm.min; - info->CreateTimestamp.Second = tm.sec; - info->CreateTimestamp.MilliSecond = 0; - - exfat_get_entry_time(ep, &tm, TM_MODIFY); - info->ModifyTimestamp.Year = tm.year; - info->ModifyTimestamp.Month = tm.mon; - info->ModifyTimestamp.Day = tm.day; - info->ModifyTimestamp.Hour = tm.hour; - info->ModifyTimestamp.Minute = tm.min; - info->ModifyTimestamp.Second = tm.sec; - info->ModifyTimestamp.MilliSecond = 0; - - memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t)); - - *uni_name.name = 0x0; - /* XXX this is very bad for exfat cuz name is already included in es. - * API should be revised - */ - exfat_get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry, - uni_name.name); - nls_uniname_to_cstring(sb, info->Name, &uni_name); - - info->NumSubdirs = 2; - - info->Size = exfat_get_entry_size(ep2); - - release_entry_set(es); - - if (is_dir) { - dir.dir = fid->start_clu; - dir.flags = 0x01; - - if (info->Size == 0) - info->Size = (u64)count_num_clusters(sb, &dir) << - p_fs->cluster_size_bits; - - count = count_dos_name_entries(sb, &dir, TYPE_DIR); - if (count < 0) { - ret = count; /* propagate error upward */ - goto out; - } - info->NumSubdirs += count; - } - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - pr_debug("%s exited successfully\n", __func__); - return ret; -} - -static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info) -{ - int ret = 0; - struct timestamp_t tm; - struct dentry_t *ep, *ep2; - struct entry_set_cache_t *es = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; - - pr_debug("%s entered (inode %p info %p\n", __func__, inode, info); - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - if (is_dir) { - if ((fid->dir.dir == p_fs->root_dir) && - (fid->entry == -1)) { - if (p_fs->dev_ejected) - ret = -EIO; - ret = 0; - goto out; - } - } - - fs_set_vol_flags(sb, VOL_DIRTY); - - /* get the directory entry of given file or directory */ - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_ALL_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - ep2 = ep + 1; - - exfat_set_entry_attr(ep, info->Attr); - - /* set FILE_INFO structure using the acquired struct dentry_t */ - tm.sec = info->CreateTimestamp.Second; - tm.min = info->CreateTimestamp.Minute; - tm.hour = info->CreateTimestamp.Hour; - tm.day = info->CreateTimestamp.Day; - tm.mon = info->CreateTimestamp.Month; - tm.year = info->CreateTimestamp.Year; - exfat_set_entry_time(ep, &tm, TM_CREATE); - - tm.sec = info->ModifyTimestamp.Second; - tm.min = info->ModifyTimestamp.Minute; - tm.hour = info->ModifyTimestamp.Hour; - tm.day = info->ModifyTimestamp.Day; - tm.mon = info->ModifyTimestamp.Month; - tm.year = info->ModifyTimestamp.Year; - exfat_set_entry_time(ep, &tm, TM_MODIFY); - - exfat_set_entry_size(ep2, info->Size); - - update_dir_checksum_with_entry_set(sb, es); - release_entry_set(es); - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - pr_debug("%s exited (%d)\n", __func__, ret); - - return ret; -} - -static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu) -{ - s32 num_clusters, num_alloced; - bool modified = false; - u32 last_clu; - int ret = 0; - struct chain_t new_clu; - struct dentry_t *ep; - struct entry_set_cache_t *es = NULL; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - - /* check the validity of pointer parameters */ - if (!clu) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits; - - if (EXFAT_I(inode)->mmu_private == 0) - num_clusters = 0; - else - num_clusters = (s32)((EXFAT_I(inode)->mmu_private - 1) >> - p_fs->cluster_size_bits) + 1; - - *clu = last_clu = fid->start_clu; - - if (fid->flags == 0x03) { - if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { - last_clu += clu_offset - 1; - - if (clu_offset == num_clusters) - *clu = CLUSTER_32(~0); - else - *clu += clu_offset; - } - } else { - /* hint information */ - if ((clu_offset > 0) && (fid->hint_last_off > 0) && - (clu_offset >= fid->hint_last_off)) { - clu_offset -= fid->hint_last_off; - *clu = fid->hint_last_clu; - } - - while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { - last_clu = *clu; - if (exfat_fat_read(sb, *clu, clu) == -1) { - ret = -EIO; - goto out; - } - clu_offset--; - } - } - - if (*clu == CLUSTER_32(~0)) { - fs_set_vol_flags(sb, VOL_DIRTY); - - new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) : - last_clu + 1; - new_clu.size = 0; - new_clu.flags = fid->flags; - - /* (1) allocate a cluster */ - num_alloced = exfat_alloc_cluster(sb, 1, &new_clu); - if (num_alloced < 0) { - ret = -EIO; - goto out; - } else if (num_alloced == 0) { - ret = -ENOSPC; - goto out; - } - - /* (2) append to the FAT chain */ - if (last_clu == CLUSTER_32(~0)) { - if (new_clu.flags == 0x01) - fid->flags = 0x01; - fid->start_clu = new_clu.dir; - modified = true; - } else { - if (new_clu.flags != fid->flags) { - exfat_chain_cont_cluster(sb, fid->start_clu, - num_clusters); - fid->flags = 0x01; - modified = true; - } - if (new_clu.flags == 0x01) - exfat_fat_write(sb, last_clu, new_clu.dir); - } - - num_clusters += num_alloced; - *clu = new_clu.dir; - - es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, - ES_ALL_ENTRIES, &ep); - if (!es) { - ret = -ENOENT; - goto out; - } - /* get stream entry */ - ep++; - - /* (3) update directory entry */ - if (modified) { - if (exfat_get_entry_flag(ep) != fid->flags) - exfat_set_entry_flag(ep, fid->flags); - - if (exfat_get_entry_clu0(ep) != fid->start_clu) - exfat_set_entry_clu0(ep, fid->start_clu); - } - - update_dir_checksum_with_entry_set(sb, es); - release_entry_set(es); - - /* add number of new blocks to inode */ - inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9); - } - - /* hint information */ - fid->hint_last_off = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); - fid->hint_last_clu = *clu; - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -/*----------------------------------------------------------------------*/ -/* Directory Operation Functions */ -/*----------------------------------------------------------------------*/ - -static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid) -{ - int ret = 0; - struct chain_t dir; - struct uni_name_t uni_name; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - pr_debug("%s entered\n", __func__); - - /* check the validity of pointer parameters */ - if (!fid || !path || (*path == '\0')) - return -EINVAL; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - /* check the validity of directory name in the given old pathname */ - ret = resolve_path(inode, path, &dir, &uni_name); - if (ret) - goto out; - - fs_set_vol_flags(sb, VOL_DIRTY); - - ret = create_dir(inode, &dir, &uni_name, fid); - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry) -{ - int i, dentry, clu_offset; - int ret = 0; - s32 dentries_per_clu, dentries_per_clu_bits = 0; - u32 type; - sector_t sector; - struct chain_t dir, clu; - struct uni_name_t uni_name; - struct timestamp_t tm; - struct dentry_t *ep; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - - /* check the validity of pointer parameters */ - if (!dir_entry) - return -EINVAL; - - /* check if the given file ID is opened */ - if (fid->type != TYPE_DIR) - return -ENOTDIR; - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - if (fid->entry == -1) { - dir.dir = p_fs->root_dir; - dir.flags = 0x01; - } else { - dir.dir = fid->start_clu; - dir.size = (s32)(fid->size >> p_fs->cluster_size_bits); - dir.flags = fid->flags; - } - - dentry = (s32)fid->rwoffset; - - if (dir.dir == CLUSTER_32(0)) { - /* FAT16 root_dir */ - dentries_per_clu = p_fs->dentries_in_root; - - if (dentry == dentries_per_clu) { - clu.dir = CLUSTER_32(~0); - } else { - clu.dir = dir.dir; - clu.size = dir.size; - clu.flags = dir.flags; - } - } else { - dentries_per_clu = p_fs->dentries_per_clu; - dentries_per_clu_bits = ilog2(dentries_per_clu); - - clu_offset = dentry >> dentries_per_clu_bits; - clu.dir = dir.dir; - clu.size = dir.size; - clu.flags = dir.flags; - - if (clu.flags == 0x03) { - clu.dir += clu_offset; - clu.size -= clu_offset; - } else { - /* hint_information */ - if ((clu_offset > 0) && (fid->hint_last_off > 0) && - (clu_offset >= fid->hint_last_off)) { - clu_offset -= fid->hint_last_off; - clu.dir = fid->hint_last_clu; - } - - while (clu_offset > 0) { - /* clu.dir = exfat_fat_read(sb, clu.dir); */ - if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) { - ret = -EIO; - goto out; - } - clu_offset--; - } - } - } - - while (clu.dir != CLUSTER_32(~0)) { - if (p_fs->dev_ejected) - break; - - if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */ - i = dentry % dentries_per_clu; - else - i = dentry & (dentries_per_clu - 1); - - for ( ; i < dentries_per_clu; i++, dentry++) { - ep = get_entry_in_dir(sb, &clu, i, §or); - if (!ep) { - ret = -ENOENT; - goto out; - } - type = exfat_get_entry_type(ep); - - if (type == TYPE_UNUSED) - break; - - if ((type != TYPE_FILE) && (type != TYPE_DIR)) - continue; - - exfat_buf_lock(sb, sector); - dir_entry->Attr = exfat_get_entry_attr(ep); - - exfat_get_entry_time(ep, &tm, TM_CREATE); - dir_entry->CreateTimestamp.Year = tm.year; - dir_entry->CreateTimestamp.Month = tm.mon; - dir_entry->CreateTimestamp.Day = tm.day; - dir_entry->CreateTimestamp.Hour = tm.hour; - dir_entry->CreateTimestamp.Minute = tm.min; - dir_entry->CreateTimestamp.Second = tm.sec; - dir_entry->CreateTimestamp.MilliSecond = 0; - - exfat_get_entry_time(ep, &tm, TM_MODIFY); - dir_entry->ModifyTimestamp.Year = tm.year; - dir_entry->ModifyTimestamp.Month = tm.mon; - dir_entry->ModifyTimestamp.Day = tm.day; - dir_entry->ModifyTimestamp.Hour = tm.hour; - dir_entry->ModifyTimestamp.Minute = tm.min; - dir_entry->ModifyTimestamp.Second = tm.sec; - dir_entry->ModifyTimestamp.MilliSecond = 0; - - memset((char *)&dir_entry->AccessTimestamp, 0, - sizeof(struct date_time_t)); - - *uni_name.name = 0x0; - exfat_get_uni_name_from_ext_entry(sb, &dir, dentry, - uni_name.name); - nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name); - exfat_buf_unlock(sb, sector); - - ep = get_entry_in_dir(sb, &clu, i + 1, NULL); - if (!ep) { - ret = -ENOENT; - goto out; - } - - dir_entry->Size = exfat_get_entry_size(ep); - - /* hint information */ - if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */ - } else { - fid->hint_last_off = dentry >> - dentries_per_clu_bits; - fid->hint_last_clu = clu.dir; - } - - fid->rwoffset = (s64)(++dentry); - - if (p_fs->dev_ejected) - ret = -EIO; - goto out; - } - - if (dir.dir == CLUSTER_32(0)) - break; /* FAT16 root_dir */ - - if (clu.flags == 0x03) { - if ((--clu.size) > 0) - clu.dir++; - else - clu.dir = CLUSTER_32(~0); - } else { - /* clu.dir = exfat_fat_read(sb, clu.dir); */ - if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) { - ret = -EIO; - goto out; - } - } - } - - *dir_entry->Name = '\0'; - - fid->rwoffset = (s64)(++dentry); - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid) -{ - s32 dentry; - int ret = 0; - struct chain_t dir, clu_to_free; - struct super_block *sb = inode->i_sb; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - /* check the validity of the given file id */ - if (!fid) - return -EINVAL; - - dir.dir = fid->dir.dir; - dir.size = fid->dir.size; - dir.flags = fid->dir.flags; - - dentry = fid->entry; - - /* check if the file is "." or ".." */ - if (p_fs->vol_type != EXFAT) { - if ((dir.dir != p_fs->root_dir) && (dentry < 2)) - return -EPERM; - } - - /* acquire the lock for file system critical section */ - mutex_lock(&p_fs->v_mutex); - - clu_to_free.dir = fid->start_clu; - clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1; - clu_to_free.flags = fid->flags; - - if (!is_dir_empty(sb, &clu_to_free)) { - ret = -ENOTEMPTY; - goto out; - } - - fs_set_vol_flags(sb, VOL_DIRTY); - - /* (1) update the directory entry */ - remove_file(inode, &dir, dentry); - - /* (2) free the clusters */ - exfat_free_cluster(sb, &clu_to_free, 1); - - fid->size = 0; - fid->start_clu = CLUSTER_32(~0); - fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; - -#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC - fs_sync(sb, true); - fs_set_vol_flags(sb, VOL_CLEAN); -#endif - - if (p_fs->dev_ejected) - ret = -EIO; - -out: - /* release the lock for file system critical section */ - mutex_unlock(&p_fs->v_mutex); - - return ret; -} - -/*======================================================================*/ -/* Directory Entry Operations */ -/*======================================================================*/ - -static int exfat_readdir(struct file *filp, struct dir_context *ctx) -{ - struct inode *inode = file_inode(filp); - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct fs_info_t *p_fs = &sbi->fs_info; - struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); - struct dir_entry_t de; - unsigned long inum; - loff_t cpos; - int err = 0; - - __lock_super(sb); - - cpos = ctx->pos; - /* Fake . and .. for the root directory. */ - if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) { - while (cpos < 2) { - if (inode->i_ino == EXFAT_ROOT_INO) - inum = EXFAT_ROOT_INO; - else if (cpos == 0) - inum = inode->i_ino; - else /* (cpos == 1) */ - inum = parent_ino(filp->f_path.dentry); - - if (!dir_emit_dots(filp, ctx)) - goto out; - cpos++; - ctx->pos++; - } - if (cpos == 2) - cpos = 0; - } - if (cpos & (DENTRY_SIZE - 1)) { - err = -ENOENT; - goto out; - } - -get_new: - EXFAT_I(inode)->fid.size = i_size_read(inode); - EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS; - - err = ffsReadDir(inode, &de); - if (err) { - /* at least we tried to read a sector - * move cpos to next sector position (should be aligned) - */ - if (err == -EIO) { - cpos += 1 << p_bd->sector_size_bits; - cpos &= ~((1 << p_bd->sector_size_bits) - 1); - } - - goto end_of_dir; - } - - cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS; - - if (!de.Name[0]) - goto end_of_dir; - - if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) { - inum = inode->i_ino; - } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) { - inum = parent_ino(filp->f_path.dentry); - } else { - loff_t i_pos = ((loff_t)EXFAT_I(inode)->fid.start_clu << 32) | - ((EXFAT_I(inode)->fid.rwoffset - 1) & 0xffffffff); - struct inode *tmp = exfat_iget(sb, i_pos); - - if (tmp) { - inum = tmp->i_ino; - iput(tmp); - } else { - inum = iunique(sb, EXFAT_ROOT_INO); - } - } - - if (!dir_emit(ctx, de.Name, strlen(de.Name), inum, - (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) - goto out; - - ctx->pos = cpos; - goto get_new; - -end_of_dir: - ctx->pos = cpos; -out: - __unlock_super(sb); - return err; -} - -static int exfat_ioctl_volume_id(struct inode *dir) -{ - struct super_block *sb = dir->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct fs_info_t *p_fs = &sbi->fs_info; - - return p_fs->vol_id; -} - -static long exfat_generic_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) -{ - struct inode *inode = filp->f_path.dentry->d_inode; -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - unsigned int flags; -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - - switch (cmd) { - case EXFAT_IOCTL_GET_VOLUME_ID: - return exfat_ioctl_volume_id(inode); -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - case EXFAT_IOC_GET_DEBUGFLAGS: { - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - flags = sbi->debug_flags; - return put_user(flags, (int __user *)arg); - } - case EXFAT_IOC_SET_DEBUGFLAGS: { - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (get_user(flags, (int __user *)arg)) - return -EFAULT; - - __lock_super(sb); - sbi->debug_flags = flags; - __unlock_super(sb); - - return 0; - } -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - default: - return -ENOTTY; /* Inappropriate ioctl for device */ - } -} - -static const struct file_operations exfat_dir_operations = { - .llseek = generic_file_llseek, - .read = generic_read_dir, - .iterate = exfat_readdir, - .unlocked_ioctl = exfat_generic_ioctl, - .fsync = generic_file_fsync, -}; - -static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) -{ - struct super_block *sb = dir->i_sb; - struct timespec64 curtime; - struct inode *inode; - struct file_id_t fid; - loff_t i_pos; - int err; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid); - if (err) - goto out; - - INC_IVERSION(dir); - curtime = current_time(dir); - dir->i_ctime = curtime; - dir->i_mtime = curtime; - dir->i_atime = curtime; - if (IS_DIRSYNC(dir)) - (void)exfat_sync_inode(dir); - else - mark_inode_dirty(dir); - - i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff); - - inode = exfat_build_inode(sb, &fid, i_pos); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto out; - } - INC_IVERSION(inode); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - inode->i_ctime = curtime; - /* - * timestamp is already written, so mark_inode_dirty() is unnecessary. - */ - - dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); - d_instantiate(dentry, inode); - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_find(struct inode *dir, struct qstr *qname, - struct file_id_t *fid) -{ - int err; - - if (qname->len == 0) - return -ENOENT; - - err = ffsLookupFile(dir, (u8 *)qname->name, fid); - if (err) - return -ENOENT; - - return 0; -} - -static int exfat_d_anon_disconn(struct dentry *dentry) -{ - return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED); -} - -static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) -{ - struct super_block *sb = dir->i_sb; - struct inode *inode; - struct dentry *alias; - int err; - struct file_id_t fid; - loff_t i_pos; - u64 ret; - mode_t i_mode; - - __lock_super(sb); - pr_debug("%s entered\n", __func__); - err = exfat_find(dir, &dentry->d_name, &fid); - if (err) { - if (err == -ENOENT) { - inode = NULL; - goto out; - } - goto error; - } - - i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff); - inode = exfat_build_inode(sb, &fid, i_pos); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - - i_mode = inode->i_mode; - if (S_ISLNK(i_mode) && !EXFAT_I(inode)->target) { - EXFAT_I(inode)->target = kmalloc(i_size_read(inode) + 1, - GFP_KERNEL); - if (!EXFAT_I(inode)->target) { - err = -ENOMEM; - goto error; - } - ffsReadFile(dir, &fid, EXFAT_I(inode)->target, - i_size_read(inode), &ret); - *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0'; - } - - alias = d_find_alias(inode); - if (alias && !exfat_d_anon_disconn(alias)) { - BUG_ON(d_unhashed(alias)); - if (!S_ISDIR(i_mode)) - d_move(alias, dentry); - iput(inode); - __unlock_super(sb); - pr_debug("%s exited 1\n", __func__); - return alias; - } - dput(alias); -out: - __unlock_super(sb); - dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); - dentry = d_splice_alias(inode, dentry); - if (dentry) - dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); - pr_debug("%s exited 2\n", __func__); - return dentry; - -error: - __unlock_super(sb); - pr_debug("%s exited 3\n", __func__); - return ERR_PTR(err); -} - -static inline unsigned long exfat_hash(loff_t i_pos) -{ - return hash_32(i_pos, EXFAT_HASH_BITS); -} - -static void exfat_attach(struct inode *inode, loff_t i_pos) -{ - struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); - struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); - - spin_lock(&sbi->inode_hash_lock); - EXFAT_I(inode)->i_pos = i_pos; - hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head); - spin_unlock(&sbi->inode_hash_lock); -} - -static void exfat_detach(struct inode *inode) -{ - struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); - - spin_lock(&sbi->inode_hash_lock); - hlist_del_init(&EXFAT_I(inode)->i_hash_fat); - EXFAT_I(inode)->i_pos = 0; - spin_unlock(&sbi->inode_hash_lock); -} - -static int exfat_unlink(struct inode *dir, struct dentry *dentry) -{ - struct inode *inode = dentry->d_inode; - struct super_block *sb = dir->i_sb; - struct timespec64 curtime; - int err; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - EXFAT_I(inode)->fid.size = i_size_read(inode); - - err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid)); - if (err) - goto out; - - INC_IVERSION(dir); - curtime = current_time(dir); - dir->i_mtime = curtime; - dir->i_atime = curtime; - if (IS_DIRSYNC(dir)) - (void)exfat_sync_inode(dir); - else - mark_inode_dirty(dir); - - clear_nlink(inode); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - exfat_detach(inode); - remove_inode_hash(inode); - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_symlink(struct inode *dir, struct dentry *dentry, - const char *target) -{ - struct super_block *sb = dir->i_sb; - struct timespec64 curtime; - struct inode *inode; - struct file_id_t fid; - loff_t i_pos; - int err; - u64 len = (u64)strlen(target); - u64 ret; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid); - if (err) - goto out; - - - err = ffsWriteFile(dir, &fid, (char *)target, len, &ret); - - if (err) { - ffsRemoveFile(dir, &fid); - goto out; - } - - INC_IVERSION(dir); - curtime = current_time(dir); - dir->i_ctime = curtime; - dir->i_mtime = curtime; - dir->i_atime = curtime; - if (IS_DIRSYNC(dir)) - (void)exfat_sync_inode(dir); - else - mark_inode_dirty(dir); - - i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff); - - inode = exfat_build_inode(sb, &fid, i_pos); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto out; - } - INC_IVERSION(inode); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - inode->i_ctime = curtime; - /* timestamp is already written, so mark_inode_dirty() is unneeded. */ - - EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL); - if (!EXFAT_I(inode)->target) { - err = -ENOMEM; - goto out; - } - - dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); - d_instantiate(dentry, inode); - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) -{ - struct super_block *sb = dir->i_sb; - struct timespec64 curtime; - struct inode *inode; - struct file_id_t fid; - loff_t i_pos; - int err; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid); - if (err) - goto out; - - INC_IVERSION(dir); - curtime = current_time(dir); - dir->i_ctime = curtime; - dir->i_mtime = curtime; - dir->i_atime = curtime; - if (IS_DIRSYNC(dir)) - (void)exfat_sync_inode(dir); - else - mark_inode_dirty(dir); - inc_nlink(dir); - - i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff); - - inode = exfat_build_inode(sb, &fid, i_pos); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto out; - } - INC_IVERSION(inode); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - inode->i_ctime = curtime; - /* timestamp is already written, so mark_inode_dirty() is unneeded. */ - - dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); - d_instantiate(dentry, inode); - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_rmdir(struct inode *dir, struct dentry *dentry) -{ - struct inode *inode = dentry->d_inode; - struct super_block *sb = dir->i_sb; - struct timespec64 curtime; - int err; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - EXFAT_I(inode)->fid.size = i_size_read(inode); - - err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid)); - if (err) - goto out; - - INC_IVERSION(dir); - curtime = current_time(dir); - dir->i_mtime = curtime; - dir->i_atime = curtime; - if (IS_DIRSYNC(dir)) - (void)exfat_sync_inode(dir); - else - mark_inode_dirty(dir); - drop_nlink(dir); - - clear_nlink(inode); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - exfat_detach(inode); - remove_inode_hash(inode); - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry, - unsigned int flags) -{ - struct inode *old_inode, *new_inode; - struct super_block *sb = old_dir->i_sb; - struct timespec64 curtime; - loff_t i_pos; - int err; - - if (flags) - return -EINVAL; - - __lock_super(sb); - - pr_debug("%s entered\n", __func__); - - old_inode = old_dentry->d_inode; - new_inode = new_dentry->d_inode; - - EXFAT_I(old_inode)->fid.size = i_size_read(old_inode); - - err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir, - new_dentry); - if (err) - goto out; - - INC_IVERSION(new_dir); - curtime = current_time(new_dir); - new_dir->i_ctime = curtime; - new_dir->i_mtime = curtime; - new_dir->i_atime = curtime; - - if (IS_DIRSYNC(new_dir)) - (void)exfat_sync_inode(new_dir); - else - mark_inode_dirty(new_dir); - - i_pos = ((loff_t)EXFAT_I(old_inode)->fid.dir.dir << 32) | - (EXFAT_I(old_inode)->fid.entry & 0xffffffff); - - exfat_detach(old_inode); - exfat_attach(old_inode, i_pos); - if (IS_DIRSYNC(new_dir)) - (void)exfat_sync_inode(old_inode); - else - mark_inode_dirty(old_inode); - - if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) { - drop_nlink(old_dir); - if (!new_inode) - inc_nlink(new_dir); - } - INC_IVERSION(old_dir); - curtime = current_time(old_dir); - old_dir->i_ctime = curtime; - old_dir->i_mtime = curtime; - if (IS_DIRSYNC(old_dir)) - (void)exfat_sync_inode(old_dir); - else - mark_inode_dirty(old_dir); - - if (new_inode) { - exfat_detach(new_inode); - drop_nlink(new_inode); - if (S_ISDIR(new_inode->i_mode)) - drop_nlink(new_inode); - new_inode->i_ctime = current_time(new_inode); - } - -out: - __unlock_super(sb); - pr_debug("%s exited\n", __func__); - return err; -} - -static int exfat_cont_expand(struct inode *inode, loff_t size) -{ - struct address_space *mapping = inode->i_mapping; - loff_t start = i_size_read(inode), count = size - i_size_read(inode); - struct timespec64 curtime; - int err, err2; - - err = generic_cont_expand_simple(inode, size); - if (err != 0) - return err; - - curtime = current_time(inode); - inode->i_ctime = curtime; - inode->i_mtime = curtime; - mark_inode_dirty(inode); - - if (IS_SYNC(inode)) { - err = filemap_fdatawrite_range(mapping, start, - start + count - 1); - err2 = sync_mapping_buffers(mapping); - err = (err) ? (err) : (err2); - err2 = write_inode_now(inode, 1); - err = (err) ? (err) : (err2); - if (!err) - err = filemap_fdatawait_range(mapping, start, - start + count - 1); - } - return err; -} - -static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode) -{ - mode_t allow_utime = sbi->options.allow_utime; - - if (!uid_eq(current_fsuid(), inode->i_uid)) { - if (in_group_p(inode->i_gid)) - allow_utime >>= 3; - if (allow_utime & MAY_WRITE) - return 1; - } - - /* use a default check */ - return 0; -} - -static int exfat_sanitize_mode(const struct exfat_sb_info *sbi, - struct inode *inode, umode_t *mode_ptr) -{ - mode_t i_mode, mask, perm; - - i_mode = inode->i_mode; - - if (S_ISREG(i_mode) || S_ISLNK(i_mode)) - mask = sbi->options.fs_fmask; - else - mask = sbi->options.fs_dmask; - - perm = *mode_ptr & ~(S_IFMT | mask); - - /* Of the r and x bits, all (subject to umask) must be present.*/ - if ((perm & 0555) != (i_mode & 0555)) - return -EPERM; - - if (exfat_mode_can_hold_ro(inode)) { - /* - * Of the w bits, either all (subject to umask) or none must be - * present. - */ - if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask))) - return -EPERM; - } else { - /* - * If exfat_mode_can_hold_ro(inode) is false, can't change w - * bits. - */ - if ((perm & 0222) != (0222 & ~mask)) - return -EPERM; - } - - *mode_ptr &= S_IFMT | perm; - - return 0; -} - -static void exfat_truncate(struct inode *inode, loff_t old_size) -{ - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct fs_info_t *p_fs = &sbi->fs_info; - struct timespec64 curtime; - int err; - - __lock_super(sb); - - /* - * This protects against truncating a file bigger than it was then - * trying to write into the hole. - */ - if (EXFAT_I(inode)->mmu_private > i_size_read(inode)) - EXFAT_I(inode)->mmu_private = i_size_read(inode); - - if (EXFAT_I(inode)->fid.start_clu == 0) - goto out; - - err = ffsTruncateFile(inode, old_size, i_size_read(inode)); - if (err) - goto out; - - curtime = current_time(inode); - inode->i_ctime = curtime; - inode->i_mtime = curtime; - if (IS_DIRSYNC(inode)) - (void)exfat_sync_inode(inode); - else - mark_inode_dirty(inode); - - inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) & - ~((loff_t)p_fs->cluster_size - 1)) >> 9; -out: - __unlock_super(sb); -} - -static int exfat_setattr(struct dentry *dentry, struct iattr *attr) -{ - struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb); - struct inode *inode = dentry->d_inode; - unsigned int ia_valid; - int error; - loff_t old_size; - - pr_debug("%s entered\n", __func__); - - if ((attr->ia_valid & ATTR_SIZE) && - attr->ia_size > i_size_read(inode)) { - error = exfat_cont_expand(inode, attr->ia_size); - if (error || attr->ia_valid == ATTR_SIZE) - return error; - attr->ia_valid &= ~ATTR_SIZE; - } - - ia_valid = attr->ia_valid; - - if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) && - exfat_allow_set_time(sbi, inode)) { - attr->ia_valid &= ~(ATTR_MTIME_SET | - ATTR_ATIME_SET | - ATTR_TIMES_SET); - } - - error = setattr_prepare(dentry, attr); - attr->ia_valid = ia_valid; - if (error) - return error; - - if (((attr->ia_valid & ATTR_UID) && - (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) || - ((attr->ia_valid & ATTR_GID) && - (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) || - ((attr->ia_valid & ATTR_MODE) && - (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) { - return -EPERM; - } - - /* - * We don't return -EPERM here. Yes, strange, but this is too - * old behavior. - */ - if (attr->ia_valid & ATTR_MODE) { - if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0) - attr->ia_valid &= ~ATTR_MODE; - } - - EXFAT_I(inode)->fid.size = i_size_read(inode); - - if (attr->ia_valid & ATTR_SIZE) { - old_size = i_size_read(inode); - down_write(&EXFAT_I(inode)->truncate_lock); - truncate_setsize(inode, attr->ia_size); - exfat_truncate(inode, old_size); - up_write(&EXFAT_I(inode)->truncate_lock); - } - setattr_copy(inode, attr); - mark_inode_dirty(inode); - - pr_debug("%s exited\n", __func__); - return error; -} - -static int exfat_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int flags) -{ - struct inode *inode = path->dentry->d_inode; - - pr_debug("%s entered\n", __func__); - - generic_fillattr(inode, stat); - stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size; - - pr_debug("%s exited\n", __func__); - return 0; -} - -static const struct inode_operations exfat_dir_inode_operations = { - .create = exfat_create, - .lookup = exfat_lookup, - .unlink = exfat_unlink, - .symlink = exfat_symlink, - .mkdir = exfat_mkdir, - .rmdir = exfat_rmdir, - .rename = exfat_rename, - .setattr = exfat_setattr, - .getattr = exfat_getattr, -}; - -/*======================================================================*/ -/* File Operations */ -/*======================================================================*/ -static const char *exfat_get_link(struct dentry *dentry, struct inode *inode, - struct delayed_call *done) -{ - struct exfat_inode_info *ei = EXFAT_I(inode); - - if (ei->target) { - char *cookie = ei->target; - - if (cookie) - return (char *)(ei->target); - } - return NULL; -} - -static const struct inode_operations exfat_symlink_inode_operations = { - .get_link = exfat_get_link, -}; - -static int exfat_file_release(struct inode *inode, struct file *filp) -{ - struct super_block *sb = inode->i_sb; - - EXFAT_I(inode)->fid.size = i_size_read(inode); - ffsSyncVol(sb, false); - return 0; -} - -static const struct file_operations exfat_file_operations = { - .llseek = generic_file_llseek, - .read_iter = generic_file_read_iter, - .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, - .release = exfat_file_release, - .unlocked_ioctl = exfat_generic_ioctl, - .fsync = generic_file_fsync, - .splice_read = generic_file_splice_read, -}; - -static const struct inode_operations exfat_file_inode_operations = { - .setattr = exfat_setattr, - .getattr = exfat_getattr, -}; - -/*======================================================================*/ -/* Address Space Operations */ -/*======================================================================*/ - -static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys, - unsigned long *mapped_blocks, int *create) -{ - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct fs_info_t *p_fs = &sbi->fs_info; - const unsigned long blocksize = sb->s_blocksize; - const unsigned char blocksize_bits = sb->s_blocksize_bits; - sector_t last_block; - int err, clu_offset, sec_offset; - unsigned int cluster; - - *phys = 0; - *mapped_blocks = 0; - - last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; - if (sector >= last_block) { - if (*create == 0) - return 0; - } else { - *create = 0; - } - - /* cluster offset */ - clu_offset = sector >> p_fs->sectors_per_clu_bits; - - /* sector offset in cluster */ - sec_offset = sector & (p_fs->sectors_per_clu - 1); - - EXFAT_I(inode)->fid.size = i_size_read(inode); - - err = ffsMapCluster(inode, clu_offset, &cluster); - - if (!err && (cluster != CLUSTER_32(~0))) { - *phys = START_SECTOR(cluster) + sec_offset; - *mapped_blocks = p_fs->sectors_per_clu - sec_offset; - } - - return 0; -} - -static int exfat_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - struct super_block *sb = inode->i_sb; - unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; - int err; - unsigned long mapped_blocks; - sector_t phys; - - __lock_super(sb); - - err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create); - if (err) { - __unlock_super(sb); - return err; - } - - if (phys) { - max_blocks = min(mapped_blocks, max_blocks); - if (create) { - EXFAT_I(inode)->mmu_private += max_blocks << - sb->s_blocksize_bits; - set_buffer_new(bh_result); - } - map_bh(bh_result, sb, phys); - } - - bh_result->b_size = max_blocks << sb->s_blocksize_bits; - __unlock_super(sb); - - return 0; -} - -static int exfat_readpage(struct file *file, struct page *page) -{ - return mpage_readpage(page, exfat_get_block); -} - -static int exfat_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) -{ - return mpage_readpages(mapping, pages, nr_pages, exfat_get_block); -} - -static int exfat_writepage(struct page *page, struct writeback_control *wbc) -{ - return block_write_full_page(page, exfat_get_block, wbc); -} - -static int exfat_writepages(struct address_space *mapping, - struct writeback_control *wbc) -{ - return mpage_writepages(mapping, wbc, exfat_get_block); -} - -static void exfat_write_failed(struct address_space *mapping, loff_t to) -{ - struct inode *inode = mapping->host; - - if (to > i_size_read(inode)) { - truncate_pagecache(inode, i_size_read(inode)); - EXFAT_I(inode)->fid.size = i_size_read(inode); - exfat_truncate(inode, i_size_read(inode)); - } -} - -static int exfat_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int flags, - struct page **pagep, void **fsdata) -{ - int ret; - - *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - exfat_get_block, - &EXFAT_I(mapping->host)->mmu_private); - - if (ret < 0) - exfat_write_failed(mapping, pos + len); - return ret; -} - -static int exfat_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int copied, - struct page *pagep, void *fsdata) -{ - struct inode *inode = mapping->host; - struct file_id_t *fid = &(EXFAT_I(inode)->fid); - struct timespec64 curtime; - int err; - - err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); - - if (err < len) - exfat_write_failed(mapping, pos + len); - - if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) { - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_ctime = curtime; - fid->attr |= ATTR_ARCHIVE; - mark_inode_dirty(inode); - } - return err; -} - -static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter) -{ - struct inode *inode = iocb->ki_filp->f_mapping->host; - struct address_space *mapping = iocb->ki_filp->f_mapping; - ssize_t ret; - int rw; - - rw = iov_iter_rw(iter); - - if (rw == WRITE) { - if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter)) - return 0; - } - ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block); - - if ((ret < 0) && (rw & WRITE)) - exfat_write_failed(mapping, iov_iter_count(iter)); - return ret; -} - -static sector_t _exfat_bmap(struct address_space *mapping, sector_t block) -{ - sector_t blocknr; - - /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */ - down_read(&EXFAT_I(mapping->host)->truncate_lock); - blocknr = generic_block_bmap(mapping, block, exfat_get_block); - up_read(&EXFAT_I(mapping->host)->truncate_lock); - - return blocknr; -} - -static const struct address_space_operations exfat_aops = { - .readpage = exfat_readpage, - .readpages = exfat_readpages, - .writepage = exfat_writepage, - .writepages = exfat_writepages, - .write_begin = exfat_write_begin, - .write_end = exfat_write_end, - .direct_IO = exfat_direct_IO, - .bmap = _exfat_bmap -}; - -/*======================================================================*/ -/* Super Operations */ -/*======================================================================*/ - -static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct exfat_inode_info *info; - struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); - struct inode *inode = NULL; - - spin_lock(&sbi->inode_hash_lock); - hlist_for_each_entry(info, head, i_hash_fat) { - BUG_ON(info->vfs_inode.i_sb != sb); - - if (i_pos != info->i_pos) - continue; - inode = igrab(&info->vfs_inode); - if (inode) - break; - } - spin_unlock(&sbi->inode_hash_lock); - return inode; -} - -/* doesn't deal with root inode */ -static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid) -{ - struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); - struct fs_info_t *p_fs = &sbi->fs_info; - struct dir_entry_t info; - - memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t)); - - ffsReadStat(inode, &info); - - EXFAT_I(inode)->i_pos = 0; - EXFAT_I(inode)->target = NULL; - inode->i_uid = sbi->options.fs_uid; - inode->i_gid = sbi->options.fs_gid; - INC_IVERSION(inode); - inode->i_generation = prandom_u32(); - - if (info.Attr & ATTR_SUBDIR) { /* directory */ - inode->i_generation &= ~1; - inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); - inode->i_op = &exfat_dir_inode_operations; - inode->i_fop = &exfat_dir_operations; - - i_size_write(inode, info.Size); - EXFAT_I(inode)->mmu_private = i_size_read(inode); - set_nlink(inode, info.NumSubdirs); - } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */ - inode->i_generation |= 1; - inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); - inode->i_op = &exfat_symlink_inode_operations; - - i_size_write(inode, info.Size); - EXFAT_I(inode)->mmu_private = i_size_read(inode); - } else { /* regular file */ - inode->i_generation |= 1; - inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); - inode->i_op = &exfat_file_inode_operations; - inode->i_fop = &exfat_file_operations; - inode->i_mapping->a_ops = &exfat_aops; - inode->i_mapping->nrpages = 0; - - i_size_write(inode, info.Size); - EXFAT_I(inode)->mmu_private = i_size_read(inode); - } - exfat_save_attr(inode, info.Attr); - - inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) - & ~((loff_t)p_fs->cluster_size - 1)) >> 9; - - exfat_time_fat2unix(&inode->i_mtime, &info.ModifyTimestamp); - exfat_time_fat2unix(&inode->i_ctime, &info.CreateTimestamp); - exfat_time_fat2unix(&inode->i_atime, &info.AccessTimestamp); - - return 0; -} - -static struct inode *exfat_build_inode(struct super_block *sb, - struct file_id_t *fid, loff_t i_pos) -{ - struct inode *inode; - int err; - - inode = exfat_iget(sb, i_pos); - if (inode) - goto out; - inode = new_inode(sb); - if (!inode) { - inode = ERR_PTR(-ENOMEM); - goto out; - } - inode->i_ino = iunique(sb, EXFAT_ROOT_INO); - SET_IVERSION(inode, 1); - err = exfat_fill_inode(inode, fid); - if (err) { - iput(inode); - inode = ERR_PTR(err); - goto out; - } - exfat_attach(inode, i_pos); - insert_inode_hash(inode); -out: - return inode; -} - -static int exfat_sync_inode(struct inode *inode) -{ - return exfat_write_inode(inode, NULL); -} - -static struct inode *exfat_alloc_inode(struct super_block *sb) -{ - struct exfat_inode_info *ei; - - ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS); - if (!ei) - return NULL; - - init_rwsem(&ei->truncate_lock); - - return &ei->vfs_inode; -} - -static void exfat_destroy_inode(struct inode *inode) -{ - kfree(EXFAT_I(inode)->target); - EXFAT_I(inode)->target = NULL; - - kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode)); -} - -static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) -{ - struct dir_entry_t info; - - if (inode->i_ino == EXFAT_ROOT_INO) - return 0; - - info.Attr = exfat_make_attr(inode); - info.Size = i_size_read(inode); - - exfat_time_unix2fat(&inode->i_mtime, &info.ModifyTimestamp); - exfat_time_unix2fat(&inode->i_ctime, &info.CreateTimestamp); - exfat_time_unix2fat(&inode->i_atime, &info.AccessTimestamp); - - ffsWriteStat(inode, &info); - - return 0; -} - -static void exfat_evict_inode(struct inode *inode) -{ - truncate_inode_pages(&inode->i_data, 0); - - if (!inode->i_nlink) - i_size_write(inode, 0); - invalidate_inode_buffers(inode); - clear_inode(inode); - exfat_detach(inode); - - remove_inode_hash(inode); -} - -static void exfat_free_super(struct exfat_sb_info *sbi) -{ - if (sbi->nls_disk) - unload_nls(sbi->nls_disk); - if (sbi->nls_io) - unload_nls(sbi->nls_io); - if (sbi->options.iocharset != exfat_default_iocharset) - kfree(sbi->options.iocharset); - /* mutex_init is in exfat_fill_super function. only for 3.7+ */ - mutex_destroy(&sbi->s_lock); - kvfree(sbi); -} - -static void exfat_put_super(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - - if (__is_sb_dirty(sb)) - exfat_write_super(sb); - - ffsUmountVol(sb); - - sb->s_fs_info = NULL; - exfat_free_super(sbi); -} - -static void exfat_write_super(struct super_block *sb) -{ - __lock_super(sb); - - __set_sb_clean(sb); - - if (!sb_rdonly(sb)) - ffsSyncVol(sb, true); - - __unlock_super(sb); -} - -static int exfat_sync_fs(struct super_block *sb, int wait) -{ - int err = 0; - - if (__is_sb_dirty(sb)) { - __lock_super(sb); - __set_sb_clean(sb); - err = ffsSyncVol(sb, true); - __unlock_super(sb); - } - - return err; -} - -static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) -{ - struct super_block *sb = dentry->d_sb; - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - struct vol_info_t info; - - if (p_fs->used_clusters == UINT_MAX) { - if (ffsGetVolInfo(sb, &info) == -EIO) - return -EIO; - - } else { - info.FatType = p_fs->vol_type; - info.ClusterSize = p_fs->cluster_size; - info.NumClusters = p_fs->num_clusters - 2; - info.UsedClusters = p_fs->used_clusters; - info.FreeClusters = info.NumClusters - info.UsedClusters; - - if (p_fs->dev_ejected) - pr_info("[EXFAT] statfs on device that is ejected\n"); - } - - buf->f_type = sb->s_magic; - buf->f_bsize = info.ClusterSize; - buf->f_blocks = info.NumClusters; - buf->f_bfree = info.FreeClusters; - buf->f_bavail = info.FreeClusters; - buf->f_fsid.val[0] = (u32)id; - buf->f_fsid.val[1] = (u32)(id >> 32); - buf->f_namelen = 260; - - return 0; -} - -static int exfat_remount(struct super_block *sb, int *flags, char *data) -{ - *flags |= SB_NODIRATIME; - return 0; -} - -static int exfat_show_options(struct seq_file *m, struct dentry *root) -{ - struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb); - struct exfat_mount_options *opts = &sbi->options; - - if (__kuid_val(opts->fs_uid)) - seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid)); - if (__kgid_val(opts->fs_gid)) - seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid)); - seq_printf(m, ",fmask=%04o", opts->fs_fmask); - seq_printf(m, ",dmask=%04o", opts->fs_dmask); - if (opts->allow_utime) - seq_printf(m, ",allow_utime=%04o", opts->allow_utime); - if (sbi->nls_disk) - seq_printf(m, ",codepage=%s", sbi->nls_disk->charset); - if (sbi->nls_io) - seq_printf(m, ",iocharset=%s", sbi->nls_io->charset); - seq_printf(m, ",namecase=%u", opts->casesensitive); - if (opts->errors == EXFAT_ERRORS_CONT) - seq_puts(m, ",errors=continue"); - else if (opts->errors == EXFAT_ERRORS_PANIC) - seq_puts(m, ",errors=panic"); - else - seq_puts(m, ",errors=remount-ro"); -#ifdef CONFIG_STAGING_EXFAT_DISCARD - if (opts->discard) - seq_puts(m, ",discard"); -#endif - return 0; -} - -static const struct super_operations exfat_sops = { - .alloc_inode = exfat_alloc_inode, - .destroy_inode = exfat_destroy_inode, - .write_inode = exfat_write_inode, - .evict_inode = exfat_evict_inode, - .put_super = exfat_put_super, - .sync_fs = exfat_sync_fs, - .statfs = exfat_statfs, - .remount_fs = exfat_remount, - .show_options = exfat_show_options, -}; - -/*======================================================================*/ -/* Export Operations */ -/*======================================================================*/ - -static struct inode *exfat_nfs_get_inode(struct super_block *sb, u64 ino, - u32 generation) -{ - struct inode *inode = NULL; - - if (ino < EXFAT_ROOT_INO) - return inode; - inode = ilookup(sb, ino); - - if (inode && generation && (inode->i_generation != generation)) { - iput(inode); - inode = NULL; - } - - return inode; -} - -static struct dentry *exfat_fh_to_dentry(struct super_block *sb, - struct fid *fid, int fh_len, - int fh_type) -{ - return generic_fh_to_dentry(sb, fid, fh_len, fh_type, - exfat_nfs_get_inode); -} - -static struct dentry *exfat_fh_to_parent(struct super_block *sb, - struct fid *fid, int fh_len, - int fh_type) -{ - return generic_fh_to_parent(sb, fid, fh_len, fh_type, - exfat_nfs_get_inode); -} - -static const struct export_operations exfat_export_ops = { - .fh_to_dentry = exfat_fh_to_dentry, - .fh_to_parent = exfat_fh_to_parent, -}; - -/*======================================================================*/ -/* Super Block Read Operations */ -/*======================================================================*/ - -enum { - Opt_uid, - Opt_gid, - Opt_umask, - Opt_dmask, - Opt_fmask, - Opt_allow_utime, - Opt_codepage, - Opt_charset, - Opt_namecase, - Opt_debug, - Opt_err_cont, - Opt_err_panic, - Opt_err_ro, - Opt_utf8_hack, - Opt_err, -#ifdef CONFIG_STAGING_EXFAT_DISCARD - Opt_discard, -#endif /* EXFAT_CONFIG_DISCARD */ -}; - -static const match_table_t exfat_tokens = { - {Opt_uid, "uid=%u"}, - {Opt_gid, "gid=%u"}, - {Opt_umask, "umask=%o"}, - {Opt_dmask, "dmask=%o"}, - {Opt_fmask, "fmask=%o"}, - {Opt_allow_utime, "allow_utime=%o"}, - {Opt_codepage, "codepage=%u"}, - {Opt_charset, "iocharset=%s"}, - {Opt_namecase, "namecase=%u"}, - {Opt_debug, "debug"}, - {Opt_err_cont, "errors=continue"}, - {Opt_err_panic, "errors=panic"}, - {Opt_err_ro, "errors=remount-ro"}, - {Opt_utf8_hack, "utf8"}, -#ifdef CONFIG_STAGING_EXFAT_DISCARD - {Opt_discard, "discard"}, -#endif /* CONFIG_STAGING_EXFAT_DISCARD */ - {Opt_err, NULL} -}; - -static int parse_options(char *options, int silent, int *debug, - struct exfat_mount_options *opts) -{ - char *p; - substring_t args[MAX_OPT_ARGS]; - int option; - char *iocharset; - - opts->fs_uid = current_uid(); - opts->fs_gid = current_gid(); - opts->fs_fmask = current->fs->umask; - opts->fs_dmask = current->fs->umask; - opts->allow_utime = U16_MAX; - opts->codepage = exfat_default_codepage; - opts->iocharset = exfat_default_iocharset; - opts->casesensitive = 0; - opts->errors = EXFAT_ERRORS_RO; -#ifdef CONFIG_STAGING_EXFAT_DISCARD - opts->discard = 0; -#endif - *debug = 0; - - if (!options) - goto out; - - while ((p = strsep(&options, ","))) { - int token; - - if (!*p) - continue; - - token = match_token(p, exfat_tokens, args); - switch (token) { - case Opt_uid: - if (match_int(&args[0], &option)) - return 0; - opts->fs_uid = KUIDT_INIT(option); - break; - case Opt_gid: - if (match_int(&args[0], &option)) - return 0; - opts->fs_gid = KGIDT_INIT(option); - break; - case Opt_umask: - case Opt_dmask: - case Opt_fmask: - if (match_octal(&args[0], &option)) - return 0; - if (token != Opt_dmask) - opts->fs_fmask = option; - if (token != Opt_fmask) - opts->fs_dmask = option; - break; - case Opt_allow_utime: - if (match_octal(&args[0], &option)) - return 0; - opts->allow_utime = option & 0022; - break; - case Opt_codepage: - if (match_int(&args[0], &option)) - return 0; - opts->codepage = option; - break; - case Opt_charset: - if (opts->iocharset != exfat_default_iocharset) - kfree(opts->iocharset); - iocharset = match_strdup(&args[0]); - if (!iocharset) - return -ENOMEM; - opts->iocharset = iocharset; - break; - case Opt_namecase: - if (match_int(&args[0], &option)) - return 0; - opts->casesensitive = option; - break; - case Opt_err_cont: - opts->errors = EXFAT_ERRORS_CONT; - break; - case Opt_err_panic: - opts->errors = EXFAT_ERRORS_PANIC; - break; - case Opt_err_ro: - opts->errors = EXFAT_ERRORS_RO; - break; - case Opt_debug: - *debug = 1; - break; -#ifdef CONFIG_STAGING_EXFAT_DISCARD - case Opt_discard: - opts->discard = 1; - break; -#endif /* CONFIG_STAGING_EXFAT_DISCARD */ - case Opt_utf8_hack: - break; - default: - if (!silent) - pr_err("[EXFAT] Unrecognized mount option %s or missing value\n", - p); - return -EINVAL; - } - } - -out: - if (opts->allow_utime == U16_MAX) - opts->allow_utime = ~opts->fs_dmask & 0022; - - return 0; -} - -static void exfat_hash_init(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - int i; - - spin_lock_init(&sbi->inode_hash_lock); - for (i = 0; i < EXFAT_HASH_SIZE; i++) - INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); -} - -static int exfat_read_root(struct inode *inode) -{ - struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct fs_info_t *p_fs = &sbi->fs_info; - struct timespec64 curtime; - struct dir_entry_t info; - - EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir; - EXFAT_I(inode)->fid.dir.flags = 0x01; - EXFAT_I(inode)->fid.entry = -1; - EXFAT_I(inode)->fid.start_clu = p_fs->root_dir; - EXFAT_I(inode)->fid.flags = 0x01; - EXFAT_I(inode)->fid.type = TYPE_DIR; - EXFAT_I(inode)->fid.rwoffset = 0; - EXFAT_I(inode)->fid.hint_last_off = -1; - - EXFAT_I(inode)->target = NULL; - - ffsReadStat(inode, &info); - - inode->i_uid = sbi->options.fs_uid; - inode->i_gid = sbi->options.fs_gid; - INC_IVERSION(inode); - inode->i_generation = 0; - inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777); - inode->i_op = &exfat_dir_inode_operations; - inode->i_fop = &exfat_dir_operations; - - i_size_write(inode, info.Size); - inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) - & ~((loff_t)p_fs->cluster_size - 1)) >> 9; - EXFAT_I(inode)->i_pos = ((loff_t)p_fs->root_dir << 32) | 0xffffffff; - EXFAT_I(inode)->mmu_private = i_size_read(inode); - - exfat_save_attr(inode, ATTR_SUBDIR); - curtime = current_time(inode); - inode->i_mtime = curtime; - inode->i_atime = curtime; - inode->i_ctime = curtime; - set_nlink(inode, info.NumSubdirs + 2); - - return 0; -} - -static void setup_dops(struct super_block *sb) -{ - if (EXFAT_SB(sb)->options.casesensitive == 0) - sb->s_d_op = &exfat_ci_dentry_ops; - else - sb->s_d_op = &exfat_dentry_ops; -} - -static int exfat_fill_super(struct super_block *sb, void *data, int silent) -{ - struct inode *root_inode = NULL; - struct exfat_sb_info *sbi; - int debug, ret; - long error; - - /* - * GFP_KERNEL is ok here, because while we do hold the - * supeblock lock, memory pressure can't call back into - * the filesystem, since we're only just about to mount - * it and have no inodes etc active! - */ - sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) - return -ENOMEM; - mutex_init(&sbi->s_lock); - sb->s_fs_info = sbi; - sb->s_flags |= SB_NODIRATIME; - sb->s_magic = EXFAT_SUPER_MAGIC; - sb->s_op = &exfat_sops; - sb->s_export_op = &exfat_export_ops; - - error = parse_options(data, silent, &debug, &sbi->options); - if (error) - goto out_fail; - - setup_dops(sb); - - error = -EIO; - sb_min_blocksize(sb, 512); - sb->s_maxbytes = 0x7fffffffffffffffLL; /* maximum file size */ - - ret = ffsMountVol(sb); - if (ret) { - if (!silent) - pr_err("[EXFAT] ffsMountVol failed\n"); - - goto out_fail; - } - - /* set up enough so that it can read an inode */ - exfat_hash_init(sb); - - /* - * The low byte of FAT's first entry must have same value with - * media-field. But in real world, too many devices is - * writing wrong value. So, removed that validity check. - * - * if (FAT_FIRST_ENT(sb, media) != first) - */ - - sbi->nls_io = load_nls(sbi->options.iocharset); - - error = -ENOMEM; - root_inode = new_inode(sb); - if (!root_inode) - goto out_fail2; - root_inode->i_ino = EXFAT_ROOT_INO; - SET_IVERSION(root_inode, 1); - - error = exfat_read_root(root_inode); - if (error < 0) - goto out_fail2; - error = -ENOMEM; - exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos); - insert_inode_hash(root_inode); - sb->s_root = d_make_root(root_inode); - if (!sb->s_root) { - pr_err("[EXFAT] Getting the root inode failed\n"); - goto out_fail2; - } - - return 0; - -out_fail2: - ffsUmountVol(sb); -out_fail: - if (root_inode) - iput(root_inode); - sb->s_fs_info = NULL; - exfat_free_super(sbi); - return error; -} - -static struct dentry *exfat_fs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) -{ - return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super); -} - -static void init_once(void *foo) -{ - struct exfat_inode_info *ei = (struct exfat_inode_info *)foo; - - INIT_HLIST_NODE(&ei->i_hash_fat); - inode_init_once(&ei->vfs_inode); -} - -static int __init exfat_init_inodecache(void) -{ - exfat_inode_cachep = kmem_cache_create("exfat_inode_cache", - sizeof(struct exfat_inode_info), - 0, - (SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD), - init_once); - if (!exfat_inode_cachep) - return -ENOMEM; - return 0; -} - -static void __exit exfat_destroy_inodecache(void) -{ - /* - * Make sure all delayed rcu free inodes are flushed before we - * destroy cache. - */ - rcu_barrier(); - kmem_cache_destroy(exfat_inode_cachep); -} - -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG -static void exfat_debug_kill_sb(struct super_block *sb) -{ - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct block_device *bdev = sb->s_bdev; - struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); - - long flags; - - if (sbi) { - flags = sbi->debug_flags; - - if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) { - /* - * invalidate_bdev drops all device cache include - * dirty. We use this to simulate device removal. - */ - mutex_lock(&p_fs->v_mutex); - exfat_fat_release_all(sb); - exfat_buf_release_all(sb); - mutex_unlock(&p_fs->v_mutex); - - invalidate_bdev(bdev); - } - } - - kill_block_super(sb); -} -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - -static struct file_system_type exfat_fs_type = { - .owner = THIS_MODULE, - .name = "exfat", - .mount = exfat_fs_mount, -#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG - .kill_sb = exfat_debug_kill_sb, -#else - .kill_sb = kill_block_super, -#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */ - .fs_flags = FS_REQUIRES_DEV, -}; - -static int __init init_exfat(void) -{ - int err; - - BUILD_BUG_ON(sizeof(struct dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct dos_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct ext_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct file_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct strm_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct name_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct bmap_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct case_dentry_t) != DENTRY_SIZE); - BUILD_BUG_ON(sizeof(struct volm_dentry_t) != DENTRY_SIZE); - - pr_info("exFAT: Version %s\n", EXFAT_VERSION); - - err = exfat_init_inodecache(); - if (err) - return err; - - err = register_filesystem(&exfat_fs_type); - if (err) - return err; - - return 0; -} - -static void __exit exit_exfat(void) -{ - exfat_destroy_inodecache(); - unregister_filesystem(&exfat_fs_type); -} - -module_init(init_exfat); -module_exit(exit_exfat); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("exFAT Filesystem Driver"); -MODULE_ALIAS_FS("exfat"); diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c deleted file mode 100644 index b91a1faa0e50..000000000000 --- a/drivers/staging/exfat/exfat_upcase.c +++ /dev/null @@ -1,740 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. - */ - -#include <linux/types.h> -#include "exfat.h" - -const u8 uni_upcase[NUM_UPCASE << 1] = { - 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, - 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00, - 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00, - 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00, - 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, - 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00, - 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00, - 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, - 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00, - 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00, - 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00, - 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00, - 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00, - 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00, - 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00, - 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, - 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, - 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, - 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, - 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, - 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, - 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00, - 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00, - 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, - 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, - 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, - 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, - 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, - 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, - 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00, - 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00, - 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, - 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00, - 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, - 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00, - 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, - 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, - 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, - 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00, - 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, - 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00, - 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00, - 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00, - 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, - 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00, - 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00, - 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00, - 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, - 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, - 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, - 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, - 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, - 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00, - 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, - 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00, - 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, - 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, - 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, - 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, - 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, - 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00, - 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, - 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01, - 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01, - 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01, - 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01, - 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01, - 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01, - 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01, - 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01, - 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01, - 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01, - 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01, - 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01, - 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01, - 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01, - 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01, - 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01, - 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01, - 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01, - 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01, - 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01, - 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01, - 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01, - 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01, - 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01, - 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01, - 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01, - 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01, - 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01, - 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01, - 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01, - 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01, - 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01, - 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01, - 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01, - 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01, - 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01, - 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01, - 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01, - 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01, - 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01, - 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01, - 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01, - 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01, - 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01, - 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01, - 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01, - 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01, - 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01, - 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01, - 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01, - 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01, - 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01, - 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01, - 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01, - 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01, - 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01, - 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01, - 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01, - 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01, - 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01, - 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01, - 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01, - 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01, - 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01, - 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02, - 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02, - 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02, - 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02, - 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02, - 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02, - 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02, - 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02, - 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02, - 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02, - 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02, - 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02, - 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02, - 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02, - 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02, - 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02, - 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02, - 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02, - 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02, - 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01, - 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01, - 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01, - 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02, - 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01, - 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02, - 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C, - 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01, - 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02, - 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02, - 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02, - 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02, - 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01, - 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02, - 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01, - 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02, - 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02, - 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02, - 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02, - 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02, - 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02, - 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02, - 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02, - 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02, - 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02, - 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02, - 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02, - 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02, - 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02, - 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02, - 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02, - 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02, - 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02, - 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02, - 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02, - 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02, - 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02, - 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02, - 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02, - 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02, - 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02, - 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02, - 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02, - 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02, - 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03, - 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03, - 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03, - 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03, - 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03, - 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03, - 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03, - 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03, - 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03, - 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03, - 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03, - 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03, - 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03, - 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03, - 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03, - 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03, - 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03, - 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03, - 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03, - 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03, - 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03, - 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03, - 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03, - 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03, - 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03, - 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03, - 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03, - 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03, - 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03, - 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03, - 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03, - 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03, - 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03, - 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03, - 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03, - 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03, - 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, - 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, - 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, - 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, - 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03, - 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, - 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, - 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, - 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, - 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, - 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, - 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, - 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03, - 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, - 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, - 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03, - 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03, - 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03, - 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03, - 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03, - 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03, - 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03, - 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03, - 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03, - 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03, - 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03, - 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03, - 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03, - 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, - 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, - 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, - 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, - 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, - 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, - 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, - 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, - 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, - 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, - 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, - 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, - 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, - 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, - 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, - 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, - 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, - 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, - 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, - 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, - 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, - 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, - 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, - 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, - 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04, - 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04, - 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04, - 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04, - 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04, - 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04, - 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04, - 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04, - 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04, - 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04, - 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04, - 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04, - 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04, - 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04, - 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04, - 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04, - 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04, - 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04, - 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04, - 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04, - 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04, - 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04, - 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04, - 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04, - 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04, - 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04, - 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04, - 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04, - 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04, - 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04, - 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04, - 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04, - 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04, - 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04, - 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04, - 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04, - 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04, - 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04, - 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04, - 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04, - 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05, - 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05, - 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05, - 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05, - 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05, - 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05, - 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05, - 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05, - 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05, - 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05, - 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05, - 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05, - 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, - 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, - 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, - 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, - 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, - 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, - 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, - 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, - 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, - 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05, - 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05, - 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05, - 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, - 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, - 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, - 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, - 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, - 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, - 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, - 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, - 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, - 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF, - 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D, - 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D, - 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D, - 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D, - 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D, - 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D, - 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D, - 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D, - 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D, - 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D, - 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D, - 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D, - 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D, - 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D, - 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D, - 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D, - 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D, - 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D, - 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D, - 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D, - 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D, - 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D, - 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D, - 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D, - 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D, - 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D, - 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D, - 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D, - 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D, - 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D, - 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D, - 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D, - 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D, - 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E, - 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E, - 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E, - 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E, - 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E, - 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E, - 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E, - 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, - 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E, - 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E, - 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E, - 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E, - 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E, - 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E, - 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E, - 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E, - 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E, - 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E, - 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E, - 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E, - 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E, - 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E, - 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E, - 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E, - 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E, - 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E, - 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E, - 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E, - 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E, - 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E, - 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E, - 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E, - 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E, - 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E, - 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E, - 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E, - 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E, - 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E, - 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E, - 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E, - 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E, - 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E, - 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E, - 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E, - 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E, - 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E, - 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E, - 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E, - 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E, - 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E, - 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E, - 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E, - 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E, - 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E, - 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E, - 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E, - 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E, - 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E, - 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E, - 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E, - 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E, - 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E, - 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E, - 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E, - 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, - 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, - 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, - 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, - 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, - 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F, - 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, - 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F, - 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, - 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, - 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, - 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, - 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, - 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, - 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, - 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, - 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, - 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F, - 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, - 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F, - 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F, - 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F, - 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F, - 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F, - 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, - 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, - 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, - 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, - 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F, - 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, - 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, - 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F, - 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, - 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, - 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, - 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, - 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, - 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, - 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, - 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, - 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, - 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, - 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, - 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, - 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F, - 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F, - 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F, - 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F, - 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F, - 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F, - 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F, - 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F, - 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F, - 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F, - 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, - 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F, - 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F, - 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F, - 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, - 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F, - 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F, - 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F, - 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F, - 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F, - 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20, - 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20, - 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20, - 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20, - 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20, - 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20, - 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20, - 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20, - 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20, - 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20, - 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20, - 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20, - 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20, - 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20, - 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20, - 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20, - 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20, - 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20, - 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20, - 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20, - 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20, - 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20, - 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20, - 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20, - 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20, - 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20, - 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20, - 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20, - 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20, - 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20, - 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20, - 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20, - 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20, - 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20, - 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20, - 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20, - 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20, - 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20, - 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20, - 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20, - 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20, - 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20, - 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20, - 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20, - 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20, - 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20, - 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20, - 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20, - 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20, - 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20, - 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20, - 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20, - 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20, - 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20, - 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20, - 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20, - 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20, - 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20, - 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20, - 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20, - 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20, - 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20, - 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20, - 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20, - 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21, - 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21, - 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21, - 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21, - 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21, - 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21, - 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21, - 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21, - 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21, - 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21, - 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21, - 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21, - 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21, - 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21, - 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21, - 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21, - 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21, - 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21, - 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21, - 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21, - 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21, - 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21, - 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21, - 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21, - 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, - 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, - 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, - 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, - 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, - 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, - 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, - 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, - 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21, - 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24, - 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24, - 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24, - 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24, - 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24, - 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24, - 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24, - 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C, - 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C, - 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C, - 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C, - 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C, - 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C, - 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C, - 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C, - 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C, - 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C, - 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C, - 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C, - 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C, - 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C, - 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C, - 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C, - 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C, - 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C, - 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C, - 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C, - 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C, - 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C, - 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C, - 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C, - 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C, - 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C, - 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C, - 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C, - 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C, - 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C, - 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C, - 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C, - 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C, - 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C, - 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C, - 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C, - 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C, - 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C, - 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C, - 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C, - 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C, - 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C, - 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C, - 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C, - 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C, - 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C, - 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C, - 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C, - 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C, - 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C, - 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C, - 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C, - 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10, - 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10, - 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10, - 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10, - 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10, - 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10, - 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10, - 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10, - 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10, - 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10, - 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF, - 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF, - 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF, - 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF, - 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF, - 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF, - 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF, - 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF, - 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF, - 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF, - 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF, - 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF, - 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF, - 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF, - 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF, - 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF, - 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF, - 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF, - 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF, - 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF, - 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF, - 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF, - 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF, - 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF, - 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF, - 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF, - 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF, - 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF, - 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF, - 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF, - 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF, - 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF, - 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF, - 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF, - 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF, - 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF, - 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF, - 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF, - 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF, - 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF, - 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF, - 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF, - 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF, - 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF, - 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF, - 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF, - 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF, - 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF, - 0xFE, 0xFF, 0xFF, 0xFF -}; diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index d3e098b41b1a..4f362dad4436 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -1186,7 +1186,9 @@ static struct fbtft_platform_data *fbtft_properties_read(struct device *dev) if (device_property_present(dev, "led-gpios")) pdata->display.backlight = 1; if (device_property_present(dev, "init")) - pdata->display.fbtftops.init_display = fbtft_init_display_from_property; + pdata->display.fbtftops.init_display = + fbtft_init_display_from_property; + pdata->display.fbtftops.request_gpios = fbtft_request_gpios; return pdata; diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c index 2a5c630dab87..26e52cc2de64 100644 --- a/drivers/staging/fbtft/fbtft-sysfs.c +++ b/drivers/staging/fbtft/fbtft-sysfs.c @@ -25,6 +25,7 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves, unsigned long val = 0; int ret = 0; int curve_counter, value_counter; + int _count; fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__); @@ -68,7 +69,10 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves, ret = get_next_ulong(&curve_p, &val, " ", 16); if (ret) goto out; - curves[curve_counter * par->gamma.num_values + value_counter] = val; + + _count = curve_counter * par->gamma.num_values + + value_counter; + curves[_count] = val; value_counter++; } if (value_counter != par->gamma.num_values) { diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 5f782da51959..76f8c090a837 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -348,9 +348,17 @@ module_exit(fbtft_driver_module_exit); /* shorthand debug levels */ #define DEBUG_LEVEL_1 DEBUG_REQUEST_GPIOS -#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS | DEBUG_TIME_FIRST_UPDATE) -#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY | DEBUG_BLANK | DEBUG_REQUEST_GPIOS | DEBUG_FREE_GPIOS | DEBUG_VERIFY_GPIOS | DEBUG_BACKLIGHT | DEBUG_SYSFS) -#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE | DEBUG_FB_FILLRECT | DEBUG_FB_COPYAREA | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK) +#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS \ + | DEBUG_TIME_FIRST_UPDATE) +#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY \ + | DEBUG_BLANK | DEBUG_REQUEST_GPIOS \ + | DEBUG_FREE_GPIOS \ + | DEBUG_VERIFY_GPIOS \ + | DEBUG_BACKLIGHT | DEBUG_SYSFS) +#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE \ + | DEBUG_FB_FILLRECT \ + | DEBUG_FB_COPYAREA \ + | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK) #define DEBUG_LEVEL_5 (DEBUG_LEVEL_3 | DEBUG_UPDATE_DISPLAY) #define DEBUG_LEVEL_6 (DEBUG_LEVEL_4 | DEBUG_LEVEL_5) #define DEBUG_LEVEL_7 0xFFFFFFFF @@ -398,8 +406,8 @@ do { \ #define fbtft_par_dbg(level, par, format, arg...) \ do { \ - if (unlikely(par->debug & level)) \ - dev_info(par->info->device, format, ##arg); \ + if (unlikely((par)->debug & (level))) \ + dev_info((par)->info->device, format, ##arg); \ } while (0) #define fbtft_par_dbg_hex(level, par, dev, type, buf, num, format, arg...) \ diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c index 39c0fe347188..676d1ad1b50d 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c @@ -1492,7 +1492,8 @@ static void ethsw_unregister_notifier(struct device *dev) err = unregister_switchdev_blocking_notifier(nb); if (err) dev_err(dev, - "Failed to unregister switchdev blocking notifier (%d)\n", err); + "Failed to unregister switchdev blocking notifier (%d)\n", + err); err = unregister_switchdev_notifier(ðsw->port_switchdev_nb); if (err) diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index be6b50f454b4..cd181a64f737 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -692,8 +692,7 @@ static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev, (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC)); if (requested_permissions & ~(bar_permissions)) { dev_dbg(gasket_dev->dev, - "Attempting to map a region with requested permissions " - "0x%x, but region has permissions 0x%x.\n", + "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n", requested_permissions, bar_permissions); return false; } @@ -1180,8 +1179,7 @@ static int gasket_open(struct inode *inode, struct file *filp) inode->i_size = 0; dev_dbg(gasket_dev->dev, - "Attempting to open with tgid %u (%s) (f_mode: 0%03o, " - "fmode_write: %d is_root: %u)\n", + "Attempting to open with tgid %u (%s) (f_mode: 0%03o, fmode_write: %d is_root: %u)\n", current->tgid, task_name, filp->f_mode, (filp->f_mode & FMODE_WRITE), is_root); @@ -1258,8 +1256,7 @@ static int gasket_release(struct inode *inode, struct file *file) mutex_lock(&gasket_dev->mutex); dev_dbg(gasket_dev->dev, - "Releasing device node. Call origin: tgid %u (%s) " - "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n", + "Releasing device node. Call origin: tgid %u (%s) (f_mode: 0%03o, fmode_write: %d, is_root: %u)\n", current->tgid, task_name, file->f_mode, (file->f_mode & FMODE_WRITE), is_root); dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n", diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index db11498f6fc7..354727f0a1fc 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -513,7 +513,7 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len) length = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), hci->len) + HCI_HEADER_SIZE; - return netlink_send(lte_event.sock, idx, 0, buf, length); + return netlink_send(lte_event.sock, idx, 0, buf, length, dev); } static void gdm_lte_event_rcv(struct net_device *dev, u16 type, diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h index 51c22e3d8aeb..87b8d921fdc8 100644 --- a/drivers/staging/gdm724x/gdm_mux.h +++ b/drivers/staging/gdm724x/gdm_mux.h @@ -29,7 +29,7 @@ struct mux_pkt_header { __le32 seq_num; __le32 payload_size; __le16 packet_type; - unsigned char data[0]; + unsigned char data[]; }; struct mux_tx { diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h index 6dea3694afdd..faecdfbc664f 100644 --- a/drivers/staging/gdm724x/hci_packet.h +++ b/drivers/staging/gdm724x/hci_packet.h @@ -28,7 +28,7 @@ struct hci_packet { __dev16 cmd_evt; __dev16 len; - u8 data[0]; + u8 data[]; } __packed; struct tlv { @@ -51,7 +51,7 @@ struct sdu { __dev32 dft_eps_ID; __dev32 bearer_ID; __dev32 nic_type; - u8 data[0]; + u8 data[]; } __packed; struct multi_sdu { @@ -59,7 +59,7 @@ struct multi_sdu { __dev16 len; __dev16 num_packet; __dev16 reserved; - u8 data[0]; + u8 data[]; } __packed; struct hci_pdn_table_ind { diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c index 92440c3f055b..7902e52a699b 100644 --- a/drivers/staging/gdm724x/netlink_k.c +++ b/drivers/staging/gdm724x/netlink_k.c @@ -89,7 +89,8 @@ struct sock *netlink_init(int unit, return sock; } -int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) +int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len, + struct net_device *dev) { static u32 seq; struct sk_buff *skb = NULL; @@ -118,8 +119,8 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) return len; if (ret != -ESRCH) - pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n", - group, type, len, ret); + netdev_err(dev, "nl broadcast g=%d, t=%d, l=%d, r=%d\n", + group, type, len, ret); else if (netlink_has_listeners(sock, group + 1)) return -EAGAIN; diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h index c9e1d3b2d54f..d42eea9bea3e 100644 --- a/drivers/staging/gdm724x/netlink_k.h +++ b/drivers/staging/gdm724x/netlink_k.h @@ -10,6 +10,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, void *msg, int len)); -int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len); +int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len, + struct net_device *dev); #endif /* _NETLINK_K_H_ */ diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h index 3f1f4dd2c61a..efec0f815efd 100644 --- a/drivers/staging/greybus/audio_apbridgea.h +++ b/drivers/staging/greybus/audio_apbridgea.h @@ -65,7 +65,7 @@ struct audio_apbridgea_hdr { __u8 type; __le16 i2s_port; - __u8 data[0]; + __u8 data[]; } __packed; struct audio_apbridgea_set_config_request { diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c index 9b19ea9d3fa1..9a3f7c034ab4 100644 --- a/drivers/staging/greybus/audio_manager.c +++ b/drivers/staging/greybus/audio_manager.c @@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void) list_for_each_entry_safe(module, next, &modules_list, list) { list_del(&module->list); - kobject_put(&module->kobj); ida_simple_remove(&module_id, module->id); + kobject_put(&module->kobj); } is_empty = list_empty(&modules_list); diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 1ff34abd5692..36d99f9e419e 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -364,8 +364,7 @@ static int gb_gpio_request_handler(struct gb_operation *op) struct gb_message *request; struct gb_gpio_irq_event_request *event; u8 type = op->type; - int irq; - struct irq_desc *desc; + int irq, ret; if (type != GB_GPIO_TYPE_IRQ_EVENT) { dev_err(dev, "unsupported unsolicited request: %u\n", type); @@ -391,17 +390,15 @@ static int gb_gpio_request_handler(struct gb_operation *op) dev_err(dev, "failed to find IRQ\n"); return -EINVAL; } - desc = irq_to_desc(irq); - if (!desc) { - dev_err(dev, "failed to look up irq\n"); - return -EINVAL; - } local_irq_disable(); - generic_handle_irq_desc(desc); + ret = generic_handle_irq(irq); local_irq_enable(); - return 0; + if (ret) + dev_err(dev, "failed to invoke irq handler\n"); + + return ret; } static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c index ab06fc3b9e7e..de2f6516da09 100644 --- a/drivers/staging/greybus/i2c.c +++ b/drivers/staging/greybus/i2c.c @@ -215,20 +215,6 @@ static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count); } -#if 0 -/* Later */ -static int gb_i2c_smbus_xfer(struct i2c_adapter *adap, - u16 addr, unsigned short flags, char read_write, - u8 command, int size, union i2c_smbus_data *data) -{ - struct gb_i2c_device *gb_i2c_dev; - - gb_i2c_dev = i2c_get_adapdata(adap); - - return 0; -} -#endif - static u32 gb_i2c_functionality(struct i2c_adapter *adap) { struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap); @@ -238,7 +224,6 @@ static u32 gb_i2c_functionality(struct i2c_adapter *adap) static const struct i2c_algorithm gb_i2c_algorithm = { .master_xfer = gb_i2c_master_xfer, - /* .smbus_xfer = gb_i2c_smbus_xfer, */ .functionality = gb_i2c_functionality, }; @@ -281,7 +266,6 @@ static int gb_i2c_probe(struct gbphy_device *gbphy_dev, adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adapter->algo = &gb_i2c_algorithm; - /* adapter->algo_data = what? */ adapter->dev.parent = &gbphy_dev->dev; snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter"); diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c index 64a17dfe3b6e..2a375f407d38 100644 --- a/drivers/staging/greybus/raw.c +++ b/drivers/staging/greybus/raw.c @@ -29,7 +29,7 @@ struct gb_raw { struct raw_data { struct list_head entry; u32 len; - u8 data[0]; + u8 data[]; }; static struct class *raw_class; diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c index ba6f905f26fa..867bf289df2e 100644 --- a/drivers/staging/greybus/tools/loopback_test.c +++ b/drivers/staging/greybus/tools/loopback_test.c @@ -19,6 +19,7 @@ #include <signal.h> #define MAX_NUM_DEVICES 10 +#define MAX_SYSFS_PREFIX 0x80 #define MAX_SYSFS_PATH 0x200 #define CSV_MAX_LINE 0x1000 #define SYSFS_MAX_INT 0x20 @@ -67,7 +68,7 @@ struct loopback_results { }; struct loopback_device { - char name[MAX_SYSFS_PATH]; + char name[MAX_STR_LEN]; char sysfs_entry[MAX_SYSFS_PATH]; char debugfs_entry[MAX_SYSFS_PATH]; struct loopback_results results; @@ -93,8 +94,8 @@ struct loopback_test { int stop_all; int poll_count; char test_name[MAX_STR_LEN]; - char sysfs_prefix[MAX_SYSFS_PATH]; - char debugfs_prefix[MAX_SYSFS_PATH]; + char sysfs_prefix[MAX_SYSFS_PREFIX]; + char debugfs_prefix[MAX_SYSFS_PREFIX]; struct timespec poll_timeout; struct loopback_device devices[MAX_NUM_DEVICES]; struct loopback_results aggregate_results; @@ -637,7 +638,7 @@ baddir: static int open_poll_files(struct loopback_test *t) { struct loopback_device *dev; - char buf[MAX_STR_LEN]; + char buf[MAX_SYSFS_PATH + MAX_STR_LEN]; char dummy; int fds_idx = 0; int i; @@ -655,7 +656,7 @@ static int open_poll_files(struct loopback_test *t) goto err; } read(t->fds[fds_idx].fd, &dummy, 1); - t->fds[fds_idx].events = EPOLLERR|EPOLLPRI; + t->fds[fds_idx].events = POLLERR | POLLPRI; t->fds[fds_idx].revents = 0; fds_idx++; } @@ -748,7 +749,7 @@ static int wait_for_complete(struct loopback_test *t) } for (i = 0; i < t->poll_count; i++) { - if (t->fds[i].revents & EPOLLPRI) { + if (t->fds[i].revents & POLLPRI) { /* Dummy read to clear the event */ read(t->fds[i].fd, &dummy, 1); number_of_events++; @@ -801,8 +802,9 @@ static void prepare_devices(struct loopback_test *t) write_sysfs_val(t->devices[i].sysfs_entry, "outstanding_operations_max", t->async_outstanding_operations); - } else + } else { write_sysfs_val(t->devices[i].sysfs_entry, "async", 0); + } } } @@ -907,10 +909,10 @@ int main(int argc, char *argv[]) t.iteration_max = atoi(optarg); break; case 'S': - snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg); + snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg); break; case 'D': - snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg); + snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg); break; case 'm': t.mask = atol(optarg); @@ -961,10 +963,10 @@ int main(int argc, char *argv[]) } if (!strcmp(t.sysfs_prefix, "")) - snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix); + snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix); if (!strcmp(t.debugfs_prefix, "")) - snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix); + snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix); ret = find_loopback_devices(&t); if (ret) diff --git a/drivers/staging/hp/Kconfig b/drivers/staging/hp/Kconfig deleted file mode 100644 index f20ab21a6b2a..000000000000 --- a/drivers/staging/hp/Kconfig +++ /dev/null @@ -1,30 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# HP network device configuration -# - -config NET_VENDOR_HP - bool "HP devices" - default y - depends on ETHERNET - depends on ISA || EISA || PCI - ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about HP cards. If you say Y, you will be asked for - your specific card in the following questions. - -if NET_VENDOR_HP - -config HP100 - tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support" - depends on (ISA || EISA || PCI) - ---help--- - If you have a network (Ethernet) card of this type, say Y here. - - To compile this driver as a module, choose M here. The module - will be called hp100. - -endif # NET_VENDOR_HP diff --git a/drivers/staging/hp/Makefile b/drivers/staging/hp/Makefile deleted file mode 100644 index 5ed723bb11e2..000000000000 --- a/drivers/staging/hp/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the HP network device drivers. -# - -obj-$(CONFIG_HP100) += hp100.o diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c deleted file mode 100644 index e2f0b58e5dfd..000000000000 --- a/drivers/staging/hp/hp100.c +++ /dev/null @@ -1,3034 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* -** hp100.c -** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters -** -** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $ -** -** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz> -** Extended for new busmaster capable chipsets by -** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de> -** -** Maintained by: Jaroslav Kysela <perex@perex.cz> -** -** This driver has only been tested with -** -- HP J2585B 10/100 Mbit/s PCI Busmaster -** -- HP J2585A 10/100 Mbit/s PCI -** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC -** -- HP J2973A 10 Mbit/s PCI 10base-T -** -- HP J2573 10/100 ISA -** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA -** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI -** -** but it should also work with the other CASCADE based adapters. -** -** TODO: -** - J2573 seems to hang sometimes when in shared memory mode. -** - Mode for Priority TX -** - Check PCI registers, performance might be improved? -** - To reduce interrupt load in busmaster, one could switch off -** the interrupts that are used to refill the queues whenever the -** queues are filled up to more than a certain threshold. -** - some updates for EISA version of card -** -** -** -** 1.57c -> 1.58 -** - used indent to change coding-style -** - added KTI DP-200 EISA ID -** - ioremap is also used for low (<1MB) memory (multi-architecture support) -** -** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br> -** - release resources on failure in init_module -** -** 1.57 -> 1.57b - Jean II -** - fix spinlocks, SMP is now working ! -** -** 1.56 -> 1.57 -** - updates for new PCI interface for 2.1 kernels -** -** 1.55 -> 1.56 -** - removed printk in misc. interrupt and update statistics to allow -** monitoring of card status -** - timing changes in xmit routines, relogin to 100VG hub added when -** driver does reset -** - included fix for Compex FreedomLine PCI adapter -** -** 1.54 -> 1.55 -** - fixed bad initialization in init_module -** - added Compex FreedomLine adapter -** - some fixes in card initialization -** -** 1.53 -> 1.54 -** - added hardware multicast filter support (doesn't work) -** - little changes in hp100_sense_lan routine -** - added support for Coax and AUI (J2970) -** - fix for multiple cards and hp100_mode parameter (insmod) -** - fix for shared IRQ -** -** 1.52 -> 1.53 -** - fixed bug in multicast support -** -*/ - -#define HP100_DEFAULT_PRIORITY_TX 0 - -#undef HP100_DEBUG -#undef HP100_DEBUG_B /* Trace */ -#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */ - -#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */ -#undef HP100_DEBUG_TX -#undef HP100_DEBUG_IRQ -#undef HP100_DEBUG_RX - -#undef HP100_MULTICAST_FILTER /* Need to be debugged... */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/eisa.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/spinlock.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/types.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <asm/io.h> - -#include "hp100.h" - -/* - * defines - */ - -#define HP100_BUS_ISA 0 -#define HP100_BUS_EISA 1 -#define HP100_BUS_PCI 2 - -#define HP100_REGION_SIZE 0x20 /* for ioports */ -#define HP100_SIG_LEN 8 /* same as EISA_SIG_LEN */ - -#define HP100_MAX_PACKET_SIZE (1536+4) -#define HP100_MIN_PACKET_SIZE 60 - -#ifndef HP100_DEFAULT_RX_RATIO -/* default - 75% onboard memory on the card are used for RX packets */ -#define HP100_DEFAULT_RX_RATIO 75 -#endif - -#ifndef HP100_DEFAULT_PRIORITY_TX -/* default - don't enable transmit outgoing packets as priority */ -#define HP100_DEFAULT_PRIORITY_TX 0 -#endif - -/* - * structures - */ - -struct hp100_private { - spinlock_t lock; - char id[HP100_SIG_LEN]; - u_short chip; - u_short soft_model; - u_int memory_size; - u_int virt_memory_size; - u_short rx_ratio; /* 1 - 99 */ - u_short priority_tx; /* != 0 - priority tx */ - u_short mode; /* PIO, Shared Mem or Busmaster */ - u_char bus; - struct pci_dev *pci_dev; - short mem_mapped; /* memory mapped access */ - void __iomem *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */ - unsigned long mem_ptr_phys; /* physical memory mapped area */ - short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */ - int hub_status; /* was login to hub successful? */ - u_char mac1_mode; - u_char mac2_mode; - u_char hash_bytes[8]; - - /* Rings for busmaster mode: */ - hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */ - hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */ - hp100_ring_t *txrhead; /* Head (oldest) index into txring */ - hp100_ring_t *txrtail; /* Tail (newest) index into txring */ - - hp100_ring_t rxring[MAX_RX_PDL]; - hp100_ring_t txring[MAX_TX_PDL]; - - u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */ - u_long whatever_offset; /* Offset to bus/phys/dma address */ - int rxrcommit; /* # Rx PDLs committed to adapter */ - int txrcommit; /* # Tx PDLs committed to adapter */ -}; - -/* - * variables - */ -#ifdef CONFIG_ISA -static const char *hp100_isa_tbl[] = { - "HWPF150", /* HP J2573 rev A */ - "HWP1950", /* HP J2573 */ -}; -#endif - -static const struct eisa_device_id hp100_eisa_tbl[] = { - { "HWPF180" }, /* HP J2577 rev A */ - { "HWP1920" }, /* HP 27248B */ - { "HWP1940" }, /* HP J2577 */ - { "HWP1990" }, /* HP J2577 */ - { "CPX0301" }, /* ReadyLink ENET100-VG4 */ - { "CPX0401" }, /* FreedomLine 100/VG */ - { "" } /* Mandatory final entry ! */ -}; -MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); - -static const struct pci_device_id hp100_pci_tbl[] = { - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,}, - {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,}, - {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,}, -/* {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */ - {} /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); - -static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; -static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; -static int hp100_mode = 1; - -module_param(hp100_rx_ratio, int, 0); -module_param(hp100_priority_tx, int, 0); -module_param(hp100_mode, int, 0); - -/* - * prototypes - */ - -static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, - struct pci_dev *pci_dev); - - -static int hp100_open(struct net_device *dev); -static int hp100_close(struct net_device *dev); -static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, - struct net_device *dev); -static void hp100_rx(struct net_device *dev); -static struct net_device_stats *hp100_get_stats(struct net_device *dev); -static void hp100_misc_interrupt(struct net_device *dev); -static void hp100_update_stats(struct net_device *dev); -static void hp100_clear_stats(struct hp100_private *lp, int ioaddr); -static void hp100_set_multicast_list(struct net_device *dev); -static irqreturn_t hp100_interrupt(int irq, void *dev_id); -static void hp100_start_interface(struct net_device *dev); -static void hp100_stop_interface(struct net_device *dev); -static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr); -static int hp100_sense_lan(struct net_device *dev); -static int hp100_login_to_vg_hub(struct net_device *dev, - u_short force_relogin); -static int hp100_down_vg_link(struct net_device *dev); -static void hp100_cascade_reset(struct net_device *dev, u_short enable); -static void hp100_BM_shutdown(struct net_device *dev); -static void hp100_mmuinit(struct net_device *dev); -static void hp100_init_pdls(struct net_device *dev); -static int hp100_init_rxpdl(struct net_device *dev, - register hp100_ring_t * ringptr, - register u_int * pdlptr); -static int hp100_init_txpdl(struct net_device *dev, - register hp100_ring_t * ringptr, - register u_int * pdlptr); -static void hp100_rxfill(struct net_device *dev); -static void hp100_hwinit(struct net_device *dev); -static void hp100_clean_txring(struct net_device *dev); -#ifdef HP100_DEBUG -static void hp100_RegisterDump(struct net_device *dev); -#endif - -/* Conversion to new PCI API : - * Convert an address in a kernel buffer to a bus/phys/dma address. - * This work *only* for memory fragments part of lp->page_vaddr, - * because it was properly DMA allocated via pci_alloc_consistent(), - * so we just need to "retrieve" the original mapping to bus/phys/dma - * address - Jean II */ -static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr) -{ - struct hp100_private *lp = netdev_priv(dev); - return ((u_long) ptr) + lp->whatever_offset; -} - -static inline u_int pdl_map_data(struct hp100_private *lp, void *data) -{ - return pci_map_single(lp->pci_dev, data, - MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE); -} - -/* TODO: This function should not really be needed in a good design... */ -static void wait(void) -{ - mdelay(1); -} - -/* - * probe functions - * These functions should - if possible - avoid doing write operations - * since this could cause problems when the card is not installed. - */ - -/* - * Read board id and convert to string. - * Effectively same code as decode_eisa_sig - */ -static const char *hp100_read_id(int ioaddr) -{ - int i; - static char str[HP100_SIG_LEN]; - unsigned char sig[4], sum; - unsigned short rev; - - hp100_page(ID_MAC_ADDR); - sum = 0; - for (i = 0; i < 4; i++) { - sig[i] = hp100_inb(BOARD_ID + i); - sum += sig[i]; - } - - sum += hp100_inb(BOARD_ID + i); - if (sum != 0xff) - return NULL; /* bad checksum */ - - str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1); - str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1); - str[2] = (sig[1] & 0x1f) + ('A' - 1); - rev = (sig[2] << 8) | sig[3]; - sprintf(str + 3, "%04X", rev); - - return str; -} - -#ifdef CONFIG_ISA -static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr) -{ - const char *sig; - int i; - - if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100")) - goto err; - - if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) { - release_region(ioaddr, HP100_REGION_SIZE); - goto err; - } - - sig = hp100_read_id(ioaddr); - release_region(ioaddr, HP100_REGION_SIZE); - - if (sig == NULL) - goto err; - - i = match_string(hp100_isa_tbl, ARRAY_SIZE(hp100_isa_tbl), sig); - if (i < 0) - goto err; - - return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL); - err: - return -ENODEV; - -} -/* - * Probe for ISA board. - * EISA and PCI are handled by device infrastructure. - */ - -static int __init hp100_isa_probe(struct net_device *dev, int addr) -{ - int err = -ENODEV; - - /* Probe for a specific ISA address */ - if (addr > 0xff && addr < 0x400) - err = hp100_isa_probe1(dev, addr); - - else if (addr != 0) - err = -ENXIO; - - else { - /* Probe all ISA possible port regions */ - for (addr = 0x100; addr < 0x400; addr += 0x20) { - err = hp100_isa_probe1(dev, addr); - if (!err) - break; - } - } - return err; -} -#endif /* CONFIG_ISA */ - -#if !defined(MODULE) && defined(CONFIG_ISA) -struct net_device * __init hp100_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); - int err; - - if (!dev) - return ERR_PTR(-ENODEV); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4200, TRACE); - printk("hp100: %s: probe\n", dev->name); -#endif - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - err = hp100_isa_probe(dev, dev->base_addr); - if (err) - goto out; - - return dev; - out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif /* !MODULE && CONFIG_ISA */ - -static const struct net_device_ops hp100_bm_netdev_ops = { - .ndo_open = hp100_open, - .ndo_stop = hp100_close, - .ndo_start_xmit = hp100_start_xmit_bm, - .ndo_get_stats = hp100_get_stats, - .ndo_set_rx_mode = hp100_set_multicast_list, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static const struct net_device_ops hp100_netdev_ops = { - .ndo_open = hp100_open, - .ndo_stop = hp100_close, - .ndo_start_xmit = hp100_start_xmit, - .ndo_get_stats = hp100_get_stats, - .ndo_set_rx_mode = hp100_set_multicast_list, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, - struct pci_dev *pci_dev) -{ - int i; - int err = -ENODEV; - const char *eid; - u_int chip; - u_char uc; - u_int memory_size = 0, virt_memory_size = 0; - u_short local_mode, lsw; - short mem_mapped; - unsigned long mem_ptr_phys; - void __iomem *mem_ptr_virt; - struct hp100_private *lp; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4201, TRACE); - printk("hp100: %s: probe1\n", dev->name); -#endif - - /* memory region for programmed i/o */ - if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100")) - goto out1; - - if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) - goto out2; - - chip = hp100_inw(PAGING) & HP100_CHIPID_MASK; -#ifdef HP100_DEBUG - if (chip == HP100_CHIPID_SHASTA) - printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name); - else if (chip == HP100_CHIPID_RAINIER) - printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name); - else if (chip == HP100_CHIPID_LASSEN) - printk("hp100: %s: Lassen Chip detected.\n", dev->name); - else - printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip); -#endif - - dev->base_addr = ioaddr; - - eid = hp100_read_id(ioaddr); - if (eid == NULL) { /* bad checksum? */ - printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n", - __func__, ioaddr); - goto out2; - } - - hp100_page(ID_MAC_ADDR); - for (i = uc = 0; i < 7; i++) - uc += hp100_inb(LAN_ADDR + i); - if (uc != 0xff) { - printk(KERN_WARNING - "%s: bad lan address checksum at port 0x%x)\n", - __func__, ioaddr); - err = -EIO; - goto out2; - } - - /* Make sure, that all registers are correctly updated... */ - - hp100_load_eeprom(dev, ioaddr); - wait(); - - /* - * Determine driver operation mode - * - * Use the variable "hp100_mode" upon insmod or as kernel parameter to - * force driver modes: - * hp100_mode=1 -> default, use busmaster mode if configured. - * hp100_mode=2 -> enable shared memory mode - * hp100_mode=3 -> force use of i/o mapped mode. - * hp100_mode=4 -> same as 1, but re-set the enable bit on the card. - */ - - /* - * LSW values: - * 0x2278 -> J2585B, PnP shared memory mode - * 0x2270 -> J2585B, shared memory mode, 0xdc000 - * 0xa23c -> J2585B, I/O mapped mode - * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip) - * 0x2220 -> EISA HP, I/O (Shasta Chip) - * 0x2260 -> EISA HP, BusMaster (Shasta Chip) - */ - -#if 0 - local_mode = 0x2270; - hp100_outw(0xfefe, OPTION_LSW); - hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW); -#endif - - /* hp100_mode value maybe used in future by another card */ - local_mode = hp100_mode; - if (local_mode < 1 || local_mode > 4) - local_mode = 1; /* default */ -#ifdef HP100_DEBUG - printk("hp100: %s: original LSW = 0x%x\n", dev->name, - hp100_inw(OPTION_LSW)); -#endif - - if (local_mode == 3) { - hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW); - hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); - hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); - printk("hp100: IO mapped mode forced.\n"); - } else if (local_mode == 2) { - hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); - hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); - hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); - printk("hp100: Shared memory mode requested.\n"); - } else if (local_mode == 4) { - if (chip == HP100_CHIPID_LASSEN) { - hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW); - hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW); - printk("hp100: Busmaster mode requested.\n"); - } - local_mode = 1; - } - - if (local_mode == 1) { /* default behaviour */ - lsw = hp100_inw(OPTION_LSW); - - if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) && - (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) { -#ifdef HP100_DEBUG - printk("hp100: %s: IO_EN bit is set on card.\n", dev->name); -#endif - local_mode = 3; - } else if (chip == HP100_CHIPID_LASSEN && - (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) { - /* Conversion to new PCI API : - * I don't have the doc, but I assume that the card - * can map the full 32bit address space. - * Also, we can have EISA Busmaster cards (not tested), - * so beware !!! - Jean II */ - if((bus == HP100_BUS_PCI) && - (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) { - /* Gracefully fallback to shared memory */ - goto busmasterfail; - } - printk("hp100: Busmaster mode enabled.\n"); - hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW); - } else { - busmasterfail: -#ifdef HP100_DEBUG - printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name); - printk("hp100: %s: Trying shared memory mode.\n", dev->name); -#endif - /* In this case, try shared memory mode */ - local_mode = 2; - hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); - /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */ - } - } -#ifdef HP100_DEBUG - printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW)); -#endif - - /* Check for shared memory on the card, eventually remap it */ - hp100_page(HW_MAP); - mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0); - mem_ptr_phys = 0UL; - mem_ptr_virt = NULL; - memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07)); - virt_memory_size = 0; - - /* For memory mapped or busmaster mode, we want the memory address */ - if (mem_mapped || (local_mode == 1)) { - mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16)); - mem_ptr_phys &= ~0x1fff; /* 8k alignment */ - - if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) { - printk("hp100: Can only use programmed i/o mode.\n"); - mem_ptr_phys = 0; - mem_mapped = 0; - local_mode = 3; /* Use programmed i/o */ - } - - /* We do not need access to shared memory in busmaster mode */ - /* However in slave mode we need to remap high (>1GB) card memory */ - if (local_mode != 1) { /* = not busmaster */ - /* We try with smaller memory sizes, if ioremap fails */ - for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) { - if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) { -#ifdef HP100_DEBUG - printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys); -#endif - } else { -#ifdef HP100_DEBUG - printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt); -#endif - break; - } - } - - if (mem_ptr_virt == NULL) { /* all ioremap tries failed */ - printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n"); - local_mode = 3; - virt_memory_size = 0; - } - } - } - - if (local_mode == 3) { /* io mapped forced */ - mem_mapped = 0; - mem_ptr_phys = 0; - mem_ptr_virt = NULL; - printk("hp100: Using (slow) programmed i/o mode.\n"); - } - - /* Initialise the "private" data structure for this card. */ - lp = netdev_priv(dev); - - spin_lock_init(&lp->lock); - strlcpy(lp->id, eid, HP100_SIG_LEN); - lp->chip = chip; - lp->mode = local_mode; - lp->bus = bus; - lp->pci_dev = pci_dev; - lp->priority_tx = hp100_priority_tx; - lp->rx_ratio = hp100_rx_ratio; - lp->mem_ptr_phys = mem_ptr_phys; - lp->mem_ptr_virt = mem_ptr_virt; - hp100_page(ID_MAC_ADDR); - lp->soft_model = hp100_inb(SOFT_MODEL); - lp->mac1_mode = HP100_MAC1MODE3; - lp->mac2_mode = HP100_MAC2MODE3; - memset(&lp->hash_bytes, 0x00, 8); - - dev->base_addr = ioaddr; - - lp->memory_size = memory_size; - lp->virt_memory_size = virt_memory_size; - lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */ - - if (lp->mode == 1) /* busmaster */ - dev->netdev_ops = &hp100_bm_netdev_ops; - else - dev->netdev_ops = &hp100_netdev_ops; - - /* Ask the card for which IRQ line it is configured */ - if (bus == HP100_BUS_PCI) { - dev->irq = pci_dev->irq; - } else { - hp100_page(HW_MAP); - dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK; - if (dev->irq == 2) - dev->irq = 9; - } - - if (lp->mode == 1) /* busmaster */ - dev->dma = 4; - - /* Ask the card for its MAC address and store it for later use. */ - hp100_page(ID_MAC_ADDR); - for (i = uc = 0; i < 6; i++) - dev->dev_addr[i] = hp100_inb(LAN_ADDR + i); - - /* Reset statistics (counters) */ - hp100_clear_stats(lp, ioaddr); - - /* If busmaster mode is wanted, a dma-capable memory area is needed for - * the rx and tx PDLs - * PCI cards can access the whole PC memory. Therefore GFP_DMA is not - * needed for the allocation of the memory area. - */ - - /* TODO: We do not need this with old cards, where PDLs are stored - * in the cards shared memory area. But currently, busmaster has been - * implemented/tested only with the lassen chip anyway... */ - if (lp->mode == 1) { /* busmaster */ - dma_addr_t page_baddr; - /* Get physically continuous memory for TX & RX PDLs */ - /* Conversion to new PCI API : - * Pages are always aligned and zeroed, no need to it ourself. - * Doc says should be OK for EISA bus as well - Jean II */ - lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr); - if (!lp->page_vaddr_algn) { - err = -ENOMEM; - goto out_mem_ptr; - } - lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn); - -#ifdef HP100_DEBUG_BM - printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE); -#endif - lp->rxrcommit = lp->txrcommit = 0; - lp->rxrhead = lp->rxrtail = &(lp->rxring[0]); - lp->txrhead = lp->txrtail = &(lp->txring[0]); - } - - /* Initialise the card. */ - /* (I'm not really sure if it's a good idea to do this during probing, but - * like this it's assured that the lan connection type can be sensed - * correctly) - */ - hp100_hwinit(dev); - - /* Try to find out which kind of LAN the card is connected to. */ - lp->lan_type = hp100_sense_lan(dev); - - /* Print out a message what about what we think we have probed. */ - printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq); - switch (bus) { - case HP100_BUS_EISA: - printk("EISA"); - break; - case HP100_BUS_PCI: - printk("PCI"); - break; - default: - printk("ISA"); - break; - } - printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio); - - if (lp->mode == 2) { /* memory mapped */ - printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys, - (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1); - if (mem_ptr_virt) - printk(" (virtual base %p)", mem_ptr_virt); - printk(".\n"); - - /* Set for info when doing ifconfig */ - dev->mem_start = mem_ptr_phys; - dev->mem_end = mem_ptr_phys + lp->memory_size; - } - - printk("hp100: "); - if (lp->lan_type != HP100_LAN_ERR) - printk("Adapter is attached to "); - switch (lp->lan_type) { - case HP100_LAN_100: - printk("100Mb/s Voice Grade AnyLAN network.\n"); - break; - case HP100_LAN_10: - printk("10Mb/s network (10baseT).\n"); - break; - case HP100_LAN_COAX: - printk("10Mb/s network (coax).\n"); - break; - default: - printk("Warning! Link down.\n"); - } - - err = register_netdev(dev); - if (err) - goto out3; - - return 0; -out3: - if (local_mode == 1) - pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f, - lp->page_vaddr_algn, - virt_to_whatever(dev, lp->page_vaddr_algn)); -out_mem_ptr: - if (mem_ptr_virt) - iounmap(mem_ptr_virt); -out2: - release_region(ioaddr, HP100_REGION_SIZE); -out1: - return err; -} - -/* This procedure puts the card into a stable init state */ -static void hp100_hwinit(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4202, TRACE); - printk("hp100: %s: hwinit\n", dev->name); -#endif - - /* Initialise the card. -------------------------------------------- */ - - /* Clear all pending Ints and disable Ints */ - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */ - - hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); - hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW); - - if (lp->mode == 1) { - hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */ - wait(); - } else { - hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); - hp100_cascade_reset(dev, 1); - hp100_page(MAC_CTRL); - hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); - } - - /* Initiate EEPROM reload */ - hp100_load_eeprom(dev, 0); - - wait(); - - /* Go into reset again. */ - hp100_cascade_reset(dev, 1); - - /* Set Option Registers to a safe state */ - hp100_outw(HP100_DEBUG_EN | - HP100_RX_HDR | - HP100_EE_EN | - HP100_BM_WRITE | - HP100_BM_READ | HP100_RESET_HB | - HP100_FAKE_INT | - HP100_INT_EN | - HP100_MEM_EN | - HP100_IO_EN | HP100_RESET_LB, OPTION_LSW); - - hp100_outw(HP100_TRI_INT | - HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW); - - hp100_outb(HP100_PRIORITY_TX | - HP100_ADV_NXT_PKT | - HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW); - - /* TODO: Configure MMU for Ram Test. */ - /* TODO: Ram Test. */ - - /* Re-check if adapter is still at same i/o location */ - /* (If the base i/o in eeprom has been changed but the */ - /* registers had not been changed, a reload of the eeprom */ - /* would move the adapter to the address stored in eeprom */ - - /* TODO: Code to implement. */ - - /* Until here it was code from HWdiscover procedure. */ - /* Next comes code from mmuinit procedure of SCO BM driver which is - * called from HWconfigure in the SCO driver. */ - - /* Initialise MMU, eventually switch on Busmaster Mode, initialise - * multicast filter... - */ - hp100_mmuinit(dev); - - /* We don't turn the interrupts on here - this is done by start_interface. */ - wait(); /* TODO: Do we really need this? */ - - /* Enable Hardware (e.g. unreset) */ - hp100_cascade_reset(dev, 0); - - /* ------- initialisation complete ----------- */ - - /* Finally try to log in the Hub if there may be a VG connection. */ - if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR)) - hp100_login_to_vg_hub(dev, 0); /* relogin */ - -} - - -/* - * mmuinit - Reinitialise Cascade MMU and MAC settings. - * Note: Must already be in reset and leaves card in reset. - */ -static void hp100_mmuinit(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - int i; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4203, TRACE); - printk("hp100: %s: mmuinit\n", dev->name); -#endif - -#ifdef HP100_DEBUG - if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) { - printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name); - return; - } -#endif - - /* Make sure IRQs are masked off and ack'ed. */ - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ - - /* - * Enable Hardware - * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En - * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable - * - Clear Priority, Advance Pkt and Xmit Cmd - */ - - hp100_outw(HP100_DEBUG_EN | - HP100_RX_HDR | - HP100_EE_EN | HP100_RESET_HB | - HP100_IO_EN | - HP100_FAKE_INT | - HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); - - hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW); - - if (lp->mode == 1) { /* busmaster */ - hp100_outw(HP100_BM_WRITE | - HP100_BM_READ | - HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW); - } else if (lp->mode == 2) { /* memory mapped */ - hp100_outw(HP100_BM_WRITE | - HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); - hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW); - hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); - hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); - } else if (lp->mode == 3) { /* i/o mapped mode */ - hp100_outw(HP100_MMAP_DIS | HP100_SET_HB | - HP100_IO_EN | HP100_SET_LB, OPTION_LSW); - } - - hp100_page(HW_MAP); - hp100_outb(0, EARLYRXCFG); - hp100_outw(0, EARLYTXCFG); - - /* - * Enable Bus Master mode - */ - if (lp->mode == 1) { /* busmaster */ - /* Experimental: Set some PCI configuration bits */ - hp100_page(HW_MAP); - hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */ - hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */ - - /* PCI Bus failures should result in a Misc. Interrupt */ - hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2); - - hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW); - hp100_page(HW_MAP); - /* Use Burst Mode and switch on PAGE_CK */ - hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM); - if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA)) - hp100_orb(HP100_BM_PAGE_CK, BM); - hp100_orb(HP100_BM_MASTER, BM); - } else { /* not busmaster */ - - hp100_page(HW_MAP); - hp100_andb(~HP100_BM_MASTER, BM); - } - - /* - * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs - */ - hp100_page(MMU_CFG); - if (lp->mode == 1) { /* only needed for Busmaster */ - int xmit_stop, recv_stop; - - if ((lp->chip == HP100_CHIPID_RAINIER) || - (lp->chip == HP100_CHIPID_SHASTA)) { - int pdl_stop; - - /* - * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and - * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded - * to the next higher 1k boundary) bytes for the rx-pdl's - * Note: For non-etr chips the transmit stop register must be - * programmed on a 1k boundary, i.e. bits 9:0 must be zero. - */ - pdl_stop = lp->memory_size; - xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff); - recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff); - hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP); -#ifdef HP100_DEBUG_BM - printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop); -#endif - } else { - /* ETR chip (Lassen) in busmaster mode */ - xmit_stop = (lp->memory_size) - 1; - recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff); - } - - hp100_outw(xmit_stop >> 4, TX_MEM_STOP); - hp100_outw(recv_stop >> 4, RX_MEM_STOP); -#ifdef HP100_DEBUG_BM - printk("hp100: %s: TX_STOP = 0x%x\n", dev->name, xmit_stop >> 4); - printk("hp100: %s: RX_STOP = 0x%x\n", dev->name, recv_stop >> 4); -#endif - } else { - /* Slave modes (memory mapped and programmed io) */ - hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP); - hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP); -#ifdef HP100_DEBUG - printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP)); - printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP)); -#endif - } - - /* Write MAC address into page 1 */ - hp100_page(MAC_ADDRESS); - for (i = 0; i < 6; i++) - hp100_outb(dev->dev_addr[i], MAC_ADDR + i); - - /* Zero the multicast hash registers */ - for (i = 0; i < 8; i++) - hp100_outb(0x0, HASH_BYTE0 + i); - - /* Set up MAC defaults */ - hp100_page(MAC_CTRL); - - /* Go to LAN Page and zero all filter bits */ - /* Zero accept error, accept multicast, accept broadcast and accept */ - /* all directed packet bits */ - hp100_andb(~(HP100_RX_EN | - HP100_TX_EN | - HP100_ACC_ERRORED | - HP100_ACC_MC | - HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1); - - hp100_outb(0x00, MAC_CFG_2); - - /* Zero the frame format bit. This works around a training bug in the */ - /* new hubs. */ - hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */ - - if (lp->priority_tx) - hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW); - else - hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW); - - hp100_outb(HP100_ADV_NXT_PKT | - HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW); - - /* If busmaster, initialize the PDLs */ - if (lp->mode == 1) - hp100_init_pdls(dev); - - /* Go to performance page and initialize isr and imr registers */ - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ -} - -/* - * open/close functions - */ - -static int hp100_open(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); -#ifdef HP100_DEBUG_B - int ioaddr = dev->base_addr; -#endif - -#ifdef HP100_DEBUG_B - hp100_outw(0x4204, TRACE); - printk("hp100: %s: open\n", dev->name); -#endif - - /* New: if bus is PCI or EISA, interrupts might be shared interrupts */ - if (request_irq(dev->irq, hp100_interrupt, - lp->bus == HP100_BUS_PCI || lp->bus == - HP100_BUS_EISA ? IRQF_SHARED : 0, - dev->name, dev)) { - printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); - return -EAGAIN; - } - - netif_trans_update(dev); /* prevent tx timeout */ - netif_start_queue(dev); - - lp->lan_type = hp100_sense_lan(dev); - lp->mac1_mode = HP100_MAC1MODE3; - lp->mac2_mode = HP100_MAC2MODE3; - memset(&lp->hash_bytes, 0x00, 8); - - hp100_stop_interface(dev); - - hp100_hwinit(dev); - - hp100_start_interface(dev); /* sets mac modes, enables interrupts */ - - return 0; -} - -/* The close function is called when the interface is to be brought down */ -static int hp100_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4205, TRACE); - printk("hp100: %s: close\n", dev->name); -#endif - - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all IRQs */ - - hp100_stop_interface(dev); - - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - - netif_stop_queue(dev); - - free_irq(dev->irq, dev); - -#ifdef HP100_DEBUG - printk("hp100: %s: close LSW = 0x%x\n", dev->name, - hp100_inw(OPTION_LSW)); -#endif - - return 0; -} - - -/* - * Configure the PDL Rx rings and LAN - */ -static void hp100_init_pdls(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); - hp100_ring_t *ringptr; - u_int *pageptr; /* Warning : increment by 4 - Jean II */ - int i; - -#ifdef HP100_DEBUG_B - int ioaddr = dev->base_addr; -#endif - -#ifdef HP100_DEBUG_B - hp100_outw(0x4206, TRACE); - printk("hp100: %s: init pdls\n", dev->name); -#endif - - if (!lp->page_vaddr_algn) - printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name); - else { - /* pageptr shall point into the DMA accessible memory region */ - /* we use this pointer to status the upper limit of allocated */ - /* memory in the allocated page. */ - /* note: align the pointers to the pci cache line size */ - memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */ - pageptr = lp->page_vaddr_algn; - - lp->rxrcommit = 0; - ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]); - - /* Initialise Rx Ring */ - for (i = MAX_RX_PDL - 1; i >= 0; i--) { - lp->rxring[i].next = ringptr; - ringptr = &(lp->rxring[i]); - pageptr += hp100_init_rxpdl(dev, ringptr, pageptr); - } - - /* Initialise Tx Ring */ - lp->txrcommit = 0; - ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]); - for (i = MAX_TX_PDL - 1; i >= 0; i--) { - lp->txring[i].next = ringptr; - ringptr = &(lp->txring[i]); - pageptr += hp100_init_txpdl(dev, ringptr, pageptr); - } - } -} - - -/* These functions "format" the entries in the pdl structure */ -/* They return how much memory the fragments need. */ -static int hp100_init_rxpdl(struct net_device *dev, - register hp100_ring_t * ringptr, - register u32 * pdlptr) -{ - /* pdlptr is starting address for this pdl */ - - if (0 != (((unsigned long) pdlptr) & 0xf)) - printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n", - dev->name, (unsigned long) pdlptr); - - ringptr->pdl = pdlptr + 1; - ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1); - ringptr->skb = NULL; - - /* - * Write address and length of first PDL Fragment (which is used for - * storing the RX-Header - * We use the 4 bytes _before_ the PDH in the pdl memory area to - * store this information. (PDH is at offset 0x04) - */ - /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */ - - *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */ - *(pdlptr + 3) = 4; /* Length Frag 1 */ - - return roundup(MAX_RX_FRAG * 2 + 2, 4); -} - - -static int hp100_init_txpdl(struct net_device *dev, - register hp100_ring_t * ringptr, - register u32 * pdlptr) -{ - if (0 != (((unsigned long) pdlptr) & 0xf)) - printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr); - - ringptr->pdl = pdlptr; /* +1; */ - ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ - ringptr->skb = NULL; - - return roundup(MAX_TX_FRAG * 2 + 2, 4); -} - -/* - * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes - * for possible odd word alignment rounding up to next dword and set PDL - * address for fragment#2 - * Returns: 0 if unable to allocate skb_buff - * 1 if successful - */ -static int hp100_build_rx_pdl(hp100_ring_t * ringptr, - struct net_device *dev) -{ -#ifdef HP100_DEBUG_B - int ioaddr = dev->base_addr; -#endif -#ifdef HP100_DEBUG_BM - u_int *p; -#endif - -#ifdef HP100_DEBUG_B - hp100_outw(0x4207, TRACE); - printk("hp100: %s: build rx pdl\n", dev->name); -#endif - - /* Allocate skb buffer of maximum size */ - /* Note: This depends on the alloc_skb functions allocating more - * space than requested, i.e. aligning to 16bytes */ - - ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4)); - - if (NULL != ringptr->skb) { - /* - * Reserve 2 bytes at the head of the buffer to land the IP header - * on a long word boundary (According to the Network Driver section - * in the Linux KHG, this should help to increase performance.) - */ - skb_reserve(ringptr->skb, 2); - - ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE); - - /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */ - /* Note: 1st Fragment is used for the 4 byte packet status - * (receive header). Its PDL entries are set up by init_rxpdl. So - * here we only have to set up the PDL fragment entries for the data - * part. Those 4 bytes will be stored in the DMA memory region - * directly before the PDL. - */ -#ifdef HP100_DEBUG_BM - printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n", - dev->name, (u_int) ringptr->pdl, - roundup(MAX_ETHER_SIZE + 2, 4), - (unsigned int) ringptr->skb->data); -#endif - - /* Conversion to new PCI API : map skbuf data to PCI bus. - * Doc says it's OK for EISA as well - Jean II */ - ringptr->pdl[0] = 0x00020000; /* Write PDH */ - ringptr->pdl[3] = pdl_map_data(netdev_priv(dev), - ringptr->skb->data); - ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */ - -#ifdef HP100_DEBUG_BM - for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++) - printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p); -#endif - return 1; - } - /* else: */ - /* alloc_skb failed (no memory) -> still can receive the header - * fragment into PDL memory. make PDL safe by clearing msgptr and - * making the PDL only 1 fragment (i.e. the 4 byte packet status) - */ -#ifdef HP100_DEBUG_BM - printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl); -#endif - - ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */ - - return 0; -} - -/* - * hp100_rxfill - attempt to fill the Rx Ring will empty skb's - * - * Makes assumption that skb's are always contiguous memory areas and - * therefore PDLs contain only 2 physical fragments. - * - While the number of Rx PDLs with buffers is less than maximum - * a. Get a maximum packet size skb - * b. Put the physical address of the buffer into the PDL. - * c. Output physical address of PDL to adapter. - */ -static void hp100_rxfill(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - struct hp100_private *lp = netdev_priv(dev); - hp100_ring_t *ringptr; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4208, TRACE); - printk("hp100: %s: rxfill\n", dev->name); -#endif - - hp100_page(PERFORMANCE); - - while (lp->rxrcommit < MAX_RX_PDL) { - /* - ** Attempt to get a buffer and build a Rx PDL. - */ - ringptr = lp->rxrtail; - if (0 == hp100_build_rx_pdl(ringptr, dev)) { - return; /* None available, return */ - } - - /* Hand this PDL over to the card */ - /* Note: This needs performance page selected! */ -#ifdef HP100_DEBUG_BM - printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n", - dev->name, lp->rxrcommit, (u_int) ringptr->pdl, - (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]); -#endif - - hp100_outl((u32) ringptr->pdl_paddr, RX_PDA); - - lp->rxrcommit += 1; - lp->rxrtail = ringptr->next; - } -} - -/* - * BM_shutdown - shutdown bus mastering and leave chip in reset state - */ - -static void hp100_BM_shutdown(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - unsigned long time; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4209, TRACE); - printk("hp100: %s: bm shutdown\n", dev->name); -#endif - - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */ - - /* Ensure Interrupts are off */ - hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); - - /* Disable all MAC activity */ - hp100_page(MAC_CTRL); - hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */ - - /* If cascade MMU is not already in reset */ - if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) { - /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so - * MMU pointers will not be reset out from underneath - */ - hp100_page(MAC_CTRL); - for (time = 0; time < 5000; time++) { - if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) - break; - } - - /* Shutdown algorithm depends on the generation of Cascade */ - if (lp->chip == HP100_CHIPID_LASSEN) { /* ETR shutdown/reset */ - /* Disable Busmaster mode and wait for bit to go to zero. */ - hp100_page(HW_MAP); - hp100_andb(~HP100_BM_MASTER, BM); - /* 100 ms timeout */ - for (time = 0; time < 32000; time++) { - if (0 == (hp100_inb(BM) & HP100_BM_MASTER)) - break; - } - } else { /* Shasta or Rainier Shutdown/Reset */ - /* To ensure all bus master inloading activity has ceased, - * wait for no Rx PDAs or no Rx packets on card. - */ - hp100_page(PERFORMANCE); - /* 100 ms timeout */ - for (time = 0; time < 10000; time++) { - /* RX_PDL: PDLs not executed. */ - /* RX_PKT_CNT: RX'd packets on card. */ - if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0)) - break; - } - - if (time >= 10000) - printk("hp100: %s: BM shutdown error.\n", dev->name); - - /* To ensure all bus master outloading activity has ceased, - * wait until the Tx PDA count goes to zero or no more Tx space - * available in the Tx region of the card. - */ - /* 100 ms timeout */ - for (time = 0; time < 10000; time++) { - if ((0 == hp100_inb(TX_PKT_CNT)) && - (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE))) - break; - } - - /* Disable Busmaster mode */ - hp100_page(HW_MAP); - hp100_andb(~HP100_BM_MASTER, BM); - } /* end of shutdown procedure for non-etr parts */ - - hp100_cascade_reset(dev, 1); - } - hp100_page(PERFORMANCE); - /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */ - /* Busmaster mode should be shut down now. */ -} - -static int hp100_check_lan(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); - - if (lp->lan_type < 0) { /* no LAN type detected yet? */ - hp100_stop_interface(dev); - if ((lp->lan_type = hp100_sense_lan(dev)) < 0) { - printk("hp100: %s: no connection found - check wire\n", dev->name); - hp100_start_interface(dev); /* 10Mb/s RX packets maybe handled */ - return -EIO; - } - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */ - hp100_start_interface(dev); - } - return 0; -} - -/* - * transmit functions - */ - -/* tx function for busmaster mode */ -static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, - struct net_device *dev) -{ - unsigned long flags; - int i, ok_flag; - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - hp100_ring_t *ringptr; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4210, TRACE); - printk("hp100: %s: start_xmit_bm\n", dev->name); -#endif - if (skb->len <= 0) - goto drop; - - if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - - /* Get Tx ring tail pointer */ - if (lp->txrtail->next == lp->txrhead) { - /* No memory. */ -#ifdef HP100_DEBUG - printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); -#endif - /* not waited long enough since last tx? */ - if (time_before(jiffies, dev_trans_start(dev) + HZ)) - goto drop; - - if (hp100_check_lan(dev)) - goto drop; - - if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { - /* we have a 100Mb/s adapter but it isn't connected to hub */ - printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name); - hp100_stop_interface(dev); - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - } else { - spin_lock_irqsave(&lp->lock, flags); - hp100_ints_off(); /* Useful ? Jean II */ - i = hp100_sense_lan(dev); - hp100_ints_on(); - spin_unlock_irqrestore(&lp->lock, flags); - if (i == HP100_LAN_ERR) - printk("hp100: %s: link down detected\n", dev->name); - else if (lp->lan_type != i) { /* cable change! */ - /* it's very hard - all network settings must be changed!!! */ - printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name); - lp->lan_type = i; - hp100_stop_interface(dev); - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - } else { - printk("hp100: %s: interface reset\n", dev->name); - hp100_stop_interface(dev); - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - } - } - - goto drop; - } - - /* - * we have to turn int's off before modifying this, otherwise - * a tx_pdl_cleanup could occur at the same time - */ - spin_lock_irqsave(&lp->lock, flags); - ringptr = lp->txrtail; - lp->txrtail = ringptr->next; - - /* Check whether packet has minimal packet size */ - ok_flag = skb->len >= HP100_MIN_PACKET_SIZE; - i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE; - - ringptr->skb = skb; - ringptr->pdl[0] = ((1 << 16) | i); /* PDH: 1 Fragment & length */ - if (lp->chip == HP100_CHIPID_SHASTA) { - /* TODO:Could someone who has the EISA card please check if this works? */ - ringptr->pdl[2] = i; - } else { /* Lassen */ - /* In the PDL, don't use the padded size but the real packet size: */ - ringptr->pdl[2] = skb->len; /* 1st Frag: Length of frag */ - } - /* Conversion to new PCI API : map skbuf data to PCI bus. - * Doc says it's OK for EISA as well - Jean II */ - ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE)); /* 1st Frag: Adr. of data */ - - /* Hand this PDL to the card. */ - hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ - - lp->txrcommit++; - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; - -drop: - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - - -/* clean_txring checks if packets have been sent by the card by reading - * the TX_PDL register from the performance page and comparing it to the - * number of committed packets. It then frees the skb's of the packets that - * obviously have been sent to the network. - * - * Needs the PERFORMANCE page selected. - */ -static void hp100_clean_txring(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int donecount; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4211, TRACE); - printk("hp100: %s: clean txring\n", dev->name); -#endif - - /* How many PDLs have been transmitted? */ - donecount = (lp->txrcommit) - hp100_inb(TX_PDL); - -#ifdef HP100_DEBUG - if (donecount > MAX_TX_PDL) - printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name); -#endif - - for (; 0 != donecount; donecount--) { -#ifdef HP100_DEBUG_BM - printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n", - dev->name, (u_int) lp->txrhead->skb->data, - lp->txrcommit, hp100_inb(TX_PDL), donecount); -#endif - /* Conversion to new PCI API : NOP */ - pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); - dev_consume_skb_any(lp->txrhead->skb); - lp->txrhead->skb = NULL; - lp->txrhead = lp->txrhead->next; - lp->txrcommit--; - } -} - -/* tx function for slave modes */ -static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - unsigned long flags; - int i, ok_flag; - int ioaddr = dev->base_addr; - u_short val; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4212, TRACE); - printk("hp100: %s: start_xmit\n", dev->name); -#endif - if (skb->len <= 0) - goto drop; - - if (hp100_check_lan(dev)) - goto drop; - - /* If there is not enough free memory on the card... */ - i = hp100_inl(TX_MEM_FREE) & 0x7fffffff; - if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) { -#ifdef HP100_DEBUG - printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); -#endif - /* not waited long enough since last failed tx try? */ - if (time_before(jiffies, dev_trans_start(dev) + HZ)) { -#ifdef HP100_DEBUG - printk("hp100: %s: trans_start timing problem\n", - dev->name); -#endif - goto drop; - } - if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { - /* we have a 100Mb/s adapter but it isn't connected to hub */ - printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name); - hp100_stop_interface(dev); - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - } else { - spin_lock_irqsave(&lp->lock, flags); - hp100_ints_off(); /* Useful ? Jean II */ - i = hp100_sense_lan(dev); - hp100_ints_on(); - spin_unlock_irqrestore(&lp->lock, flags); - if (i == HP100_LAN_ERR) - printk("hp100: %s: link down detected\n", dev->name); - else if (lp->lan_type != i) { /* cable change! */ - /* it's very hard - all network setting must be changed!!! */ - printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name); - lp->lan_type = i; - hp100_stop_interface(dev); - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - } else { - printk("hp100: %s: interface reset\n", dev->name); - hp100_stop_interface(dev); - if (lp->lan_type == HP100_LAN_100) - lp->hub_status = hp100_login_to_vg_hub(dev, 0); - hp100_start_interface(dev); - mdelay(1); - } - } - goto drop; - } - - for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) { -#ifdef HP100_DEBUG_TX - printk("hp100: %s: start_xmit: busy\n", dev->name); -#endif - } - - spin_lock_irqsave(&lp->lock, flags); - hp100_ints_off(); - val = hp100_inw(IRQ_STATUS); - /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set - * when the current packet being transmitted on the wire is completed. */ - hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS); -#ifdef HP100_DEBUG_TX - printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n", - dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len); -#endif - - ok_flag = skb->len >= HP100_MIN_PACKET_SIZE; - i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE; - - hp100_outw(i, DATA32); /* tell card the total packet length */ - hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ - - if (lp->mode == 2) { /* memory mapped */ - /* Note: The J2585B needs alignment to 32bits here! */ - memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); - if (!ok_flag) - memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); - } else { /* programmed i/o */ - outsl(ioaddr + HP100_REG_DATA32, skb->data, - (skb->len + 3) >> 2); - if (!ok_flag) - for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4) - hp100_outl(0, DATA32); - } - - hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */ - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - hp100_ints_on(); - spin_unlock_irqrestore(&lp->lock, flags); - - dev_consume_skb_any(skb); - -#ifdef HP100_DEBUG_TX - printk("hp100: %s: start_xmit: end\n", dev->name); -#endif - - return NETDEV_TX_OK; - -drop: - dev_kfree_skb(skb); - return NETDEV_TX_OK; - -} - - -/* - * Receive Function (Non-Busmaster mode) - * Called when an "Receive Packet" interrupt occurs, i.e. the receive - * packet counter is non-zero. - * For non-busmaster, this function does the whole work of transferring - * the packet to the host memory and then up to higher layers via skb - * and netif_rx. - */ - -static void hp100_rx(struct net_device *dev) -{ - int packets, pkt_len; - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - u_int header; - struct sk_buff *skb; - -#ifdef DEBUG_B - hp100_outw(0x4213, TRACE); - printk("hp100: %s: rx\n", dev->name); -#endif - - /* First get indication of received lan packet */ - /* RX_PKT_CND indicates the number of packets which have been fully */ - /* received onto the card but have not been fully transferred of the card */ - packets = hp100_inb(RX_PKT_CNT); -#ifdef HP100_DEBUG_RX - if (packets > 1) - printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets); -#endif - - while (packets-- > 0) { - /* If ADV_NXT_PKT is still set, we have to wait until the card has */ - /* really advanced to the next packet. */ - for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) { -#ifdef HP100_DEBUG_RX - printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets); -#endif - } - - /* First we get the header, which contains information about the */ - /* actual length of the received packet. */ - if (lp->mode == 2) { /* memory mapped mode */ - header = readl(lp->mem_ptr_virt); - } else /* programmed i/o */ - header = hp100_inl(DATA32); - - pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3; - -#ifdef HP100_DEBUG_RX - printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n", - dev->name, header & HP100_PKT_LEN_MASK, - (header >> 16) & 0xfff8, (header >> 16) & 7); -#endif - - /* Now we allocate the skb and transfer the data into it. */ - skb = netdev_alloc_skb(dev, pkt_len + 2); - if (skb == NULL) { /* Not enough memory->drop packet */ -#ifdef HP100_DEBUG - printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", - dev->name, pkt_len); -#endif - dev->stats.rx_dropped++; - } else { /* skb successfully allocated */ - - u_char *ptr; - - skb_reserve(skb,2); - - /* ptr to start of the sk_buff data area */ - skb_put(skb, pkt_len); - ptr = skb->data; - - /* Now transfer the data from the card into that area */ - if (lp->mode == 2) - memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); - else /* io mapped */ - insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); - - skb->protocol = eth_type_trans(skb, dev); - -#ifdef HP100_DEBUG_RX - printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - dev->name, ptr[0], ptr[1], ptr[2], ptr[3], - ptr[4], ptr[5], ptr[6], ptr[7], ptr[8], - ptr[9], ptr[10], ptr[11]); -#endif - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - - /* Indicate the card that we have got the packet */ - hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW); - - switch (header & 0x00070000) { - case (HP100_MULTI_ADDR_HASH << 16): - case (HP100_MULTI_ADDR_NO_HASH << 16): - dev->stats.multicast++; - break; - } - } /* end of while(there are packets) loop */ -#ifdef HP100_DEBUG_RX - printk("hp100_rx: %s: end\n", dev->name); -#endif -} - -/* - * Receive Function for Busmaster Mode - */ -static void hp100_rx_bm(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - hp100_ring_t *ptr; - u_int header; - int pkt_len; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4214, TRACE); - printk("hp100: %s: rx_bm\n", dev->name); -#endif - -#ifdef HP100_DEBUG - if (0 == lp->rxrcommit) { - printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name); - return; - } else - /* RX_PKT_CNT states how many PDLs are currently formatted and available to - * the cards BM engine */ - if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) { - printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n", - dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff, - lp->rxrcommit); - return; - } -#endif - - while ((lp->rxrcommit > hp100_inb(RX_PDL))) { - /* - * The packet was received into the pdl pointed to by lp->rxrhead ( - * the oldest pdl in the ring - */ - - /* First we get the header, which contains information about the */ - /* actual length of the received packet. */ - - ptr = lp->rxrhead; - - header = *(ptr->pdl - 1); - pkt_len = (header & HP100_PKT_LEN_MASK); - - /* Conversion to new PCI API : NOP */ - pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE); - -#ifdef HP100_DEBUG_BM - printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n", - dev->name, (u_int) (ptr->pdl - 1), (u_int) header, - pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7); - printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n", - dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL), - hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl), - (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4)); -#endif - - if ((pkt_len >= MIN_ETHER_SIZE) && - (pkt_len <= MAX_ETHER_SIZE)) { - if (ptr->skb == NULL) { - printk("hp100: %s: rx_bm: skb null\n", dev->name); - /* can happen if we only allocated room for the pdh due to memory shortage. */ - dev->stats.rx_dropped++; - } else { - skb_trim(ptr->skb, pkt_len); /* Shorten it */ - ptr->skb->protocol = - eth_type_trans(ptr->skb, dev); - - netif_rx(ptr->skb); /* Up and away... */ - - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - - switch (header & 0x00070000) { - case (HP100_MULTI_ADDR_HASH << 16): - case (HP100_MULTI_ADDR_NO_HASH << 16): - dev->stats.multicast++; - break; - } - } else { -#ifdef HP100_DEBUG - printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len); -#endif - if (ptr->skb != NULL) - dev_kfree_skb_any(ptr->skb); - dev->stats.rx_errors++; - } - - lp->rxrhead = lp->rxrhead->next; - - /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */ - if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) { - /* No space for skb, header can still be received. */ -#ifdef HP100_DEBUG - printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name); -#endif - return; - } else { /* successfully allocated new PDL - put it in ringlist at tail. */ - hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA); - lp->rxrtail = lp->rxrtail->next; - } - - } -} - -/* - * statistics - */ -static struct net_device_stats *hp100_get_stats(struct net_device *dev) -{ - unsigned long flags; - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4215, TRACE); -#endif - - spin_lock_irqsave(&lp->lock, flags); - hp100_ints_off(); /* Useful ? Jean II */ - hp100_update_stats(dev); - hp100_ints_on(); - spin_unlock_irqrestore(&lp->lock, flags); - return &(dev->stats); -} - -static void hp100_update_stats(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - u_short val; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4216, TRACE); - printk("hp100: %s: update-stats\n", dev->name); -#endif - - /* Note: Statistics counters clear when read. */ - hp100_page(MAC_CTRL); - val = hp100_inw(DROPPED) & 0x0fff; - dev->stats.rx_errors += val; - dev->stats.rx_over_errors += val; - val = hp100_inb(CRC); - dev->stats.rx_errors += val; - dev->stats.rx_crc_errors += val; - val = hp100_inb(ABORT); - dev->stats.tx_errors += val; - dev->stats.tx_aborted_errors += val; - hp100_page(PERFORMANCE); -} - -static void hp100_misc_interrupt(struct net_device *dev) -{ -#ifdef HP100_DEBUG_B - int ioaddr = dev->base_addr; -#endif - -#ifdef HP100_DEBUG_B - int ioaddr = dev->base_addr; - hp100_outw(0x4216, TRACE); - printk("hp100: %s: misc_interrupt\n", dev->name); -#endif - - /* Note: Statistics counters clear when read. */ - dev->stats.rx_errors++; - dev->stats.tx_errors++; -} - -static void hp100_clear_stats(struct hp100_private *lp, int ioaddr) -{ - unsigned long flags; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4217, TRACE); - printk("hp100: %s: clear_stats\n", dev->name); -#endif - - spin_lock_irqsave(&lp->lock, flags); - hp100_page(MAC_CTRL); /* get all statistics bytes */ - hp100_inw(DROPPED); - hp100_inb(CRC); - hp100_inb(ABORT); - hp100_page(PERFORMANCE); - spin_unlock_irqrestore(&lp->lock, flags); -} - - -/* - * multicast setup - */ - -/* - * Set or clear the multicast filter for this adapter. - */ - -static void hp100_set_multicast_list(struct net_device *dev) -{ - unsigned long flags; - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4218, TRACE); - printk("hp100: %s: set_mc_list\n", dev->name); -#endif - - spin_lock_irqsave(&lp->lock, flags); - hp100_ints_off(); - hp100_page(MAC_CTRL); - hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */ - - if (dev->flags & IFF_PROMISC) { - lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */ - lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */ - memset(&lp->hash_bytes, 0xff, 8); - } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) { - lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */ - lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */ -#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */ - if (dev->flags & IFF_ALLMULTI) { - /* set hash filter to receive all multicast packets */ - memset(&lp->hash_bytes, 0xff, 8); - } else { - int i, idx; - u_char *addrs; - struct netdev_hw_addr *ha; - - memset(&lp->hash_bytes, 0x00, 8); -#ifdef HP100_DEBUG - printk("hp100: %s: computing hash filter - mc_count = %i\n", - dev->name, netdev_mc_count(dev)); -#endif - netdev_for_each_mc_addr(ha, dev) { - addrs = ha->addr; -#ifdef HP100_DEBUG - printk("hp100: %s: multicast = %pM, ", - dev->name, addrs); -#endif - for (i = idx = 0; i < 6; i++) { - idx ^= *addrs++ & 0x3f; - printk(":%02x:", idx); - } -#ifdef HP100_DEBUG - printk("idx = %i\n", idx); -#endif - lp->hash_bytes[idx >> 3] |= (1 << (idx & 7)); - } - } -#else - memset(&lp->hash_bytes, 0xff, 8); -#endif - } else { - lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */ - lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */ - memset(&lp->hash_bytes, 0x00, 8); - } - - if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) || - (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) { - int i; - - hp100_outb(lp->mac2_mode, MAC_CFG_2); - hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1); /* clear mac1 mode bits */ - hp100_orb(lp->mac1_mode, MAC_CFG_1); /* and set the new mode */ - - hp100_page(MAC_ADDRESS); - for (i = 0; i < 8; i++) - hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i); -#ifdef HP100_DEBUG - printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, lp->mac1_mode, lp->mac2_mode, - lp->hash_bytes[0], lp->hash_bytes[1], - lp->hash_bytes[2], lp->hash_bytes[3], - lp->hash_bytes[4], lp->hash_bytes[5], - lp->hash_bytes[6], lp->hash_bytes[7]); -#endif - - if (lp->lan_type == HP100_LAN_100) { -#ifdef HP100_DEBUG - printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name); -#endif - lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */ - } - } else { - int i; - u_char old_hash_bytes[8]; - - hp100_page(MAC_ADDRESS); - for (i = 0; i < 8; i++) - old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i); - if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) { - for (i = 0; i < 8; i++) - hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i); -#ifdef HP100_DEBUG - printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, lp->hash_bytes[0], - lp->hash_bytes[1], lp->hash_bytes[2], - lp->hash_bytes[3], lp->hash_bytes[4], - lp->hash_bytes[5], lp->hash_bytes[6], - lp->hash_bytes[7]); -#endif - - if (lp->lan_type == HP100_LAN_100) { -#ifdef HP100_DEBUG - printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name); -#endif - lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */ - } - } - } - - hp100_page(MAC_CTRL); - hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */ - HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1); /* enable tx */ - - hp100_page(PERFORMANCE); - hp100_ints_on(); - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* - * hardware interrupt handling - */ - -static irqreturn_t hp100_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct hp100_private *lp = netdev_priv(dev); - - int ioaddr; - u_int val; - - if (dev == NULL) - return IRQ_NONE; - ioaddr = dev->base_addr; - - spin_lock(&lp->lock); - - hp100_ints_off(); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4219, TRACE); -#endif - - /* hp100_page( PERFORMANCE ); */ - val = hp100_inw(IRQ_STATUS); -#ifdef HP100_DEBUG_IRQ - printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n", - dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT), - hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL)); -#endif - - if (val == 0) { /* might be a shared interrupt */ - spin_unlock(&lp->lock); - hp100_ints_on(); - return IRQ_NONE; - } - /* We're only interested in those interrupts we really enabled. */ - /* val &= hp100_inw( IRQ_MASK ); */ - - /* - * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL - * is considered executed whenever the RX_PDL data structure is no longer - * needed. - */ - if (val & HP100_RX_PDL_FILL_COMPL) { - if (lp->mode == 1) - hp100_rx_bm(dev); - else { - printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name); - } - } - - /* - * The RX_PACKET interrupt is set, when the receive packet counter is - * non zero. We use this interrupt for receiving in slave mode. In - * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill - * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then - * we somehow have missed a rx_pdl_fill_compl interrupt. - */ - - if (val & HP100_RX_PACKET) { /* Receive Packet Counter is non zero */ - if (lp->mode != 1) /* non busmaster */ - hp100_rx(dev); - else if (!(val & HP100_RX_PDL_FILL_COMPL)) { - /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt? */ - hp100_rx_bm(dev); - } - } - - /* - * Ack. that we have noticed the interrupt and thereby allow next one. - * Note that this is now done after the slave rx function, since first - * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt - * on the J2573. - */ - hp100_outw(val, IRQ_STATUS); - - /* - * RX_ERROR is set when a packet is dropped due to no memory resources on - * the card or when a RCV_ERR occurs. - * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists - * only in the 802.3 MAC and happens when 16 collisions occur during a TX - */ - if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) { -#ifdef HP100_DEBUG_IRQ - printk("hp100: %s: TX/RX Error IRQ\n", dev->name); -#endif - hp100_update_stats(dev); - if (lp->mode == 1) { - hp100_rxfill(dev); - hp100_clean_txring(dev); - } - } - - /* - * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero. - */ - if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO))) - hp100_rxfill(dev); - - /* - * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire - * is completed - */ - if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE))) - hp100_clean_txring(dev); - - /* - * MISC_ERROR is set when either the LAN link goes down or a detected - * bus error occurs. - */ - if (val & HP100_MISC_ERROR) { /* New for J2585B */ -#ifdef HP100_DEBUG_IRQ - printk - ("hp100: %s: Misc. Error Interrupt - Check cabling.\n", - dev->name); -#endif - if (lp->mode == 1) { - hp100_clean_txring(dev); - hp100_rxfill(dev); - } - hp100_misc_interrupt(dev); - } - - spin_unlock(&lp->lock); - hp100_ints_on(); - return IRQ_HANDLED; -} - -/* - * some misc functions - */ - -static void hp100_start_interface(struct net_device *dev) -{ - unsigned long flags; - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4220, TRACE); - printk("hp100: %s: hp100_start_interface\n", dev->name); -#endif - - spin_lock_irqsave(&lp->lock, flags); - - /* Ensure the adapter does not want to request an interrupt when */ - /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */ - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */ - hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB, - OPTION_LSW); - /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */ - hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW); - - if (lp->mode == 1) { - /* Make sure BM bit is set... */ - hp100_page(HW_MAP); - hp100_orb(HP100_BM_MASTER, BM); - hp100_rxfill(dev); - } else if (lp->mode == 2) { - /* Enable memory mapping. Note: Don't do this when busmaster. */ - hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW); - } - - hp100_page(PERFORMANCE); - hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ - hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ - - /* enable a few interrupts: */ - if (lp->mode == 1) { /* busmaster mode */ - hp100_outw(HP100_RX_PDL_FILL_COMPL | - HP100_RX_PDA_ZERO | HP100_RX_ERROR | - /* HP100_RX_PACKET | */ - /* HP100_RX_EARLY_INT | */ HP100_SET_HB | - /* HP100_TX_PDA_ZERO | */ - HP100_TX_COMPLETE | - /* HP100_MISC_ERROR | */ - HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK); - } else { - hp100_outw(HP100_RX_PACKET | - HP100_RX_ERROR | HP100_SET_HB | - HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK); - } - - /* Note : before hp100_set_multicast_list(), because it will play with - * spinlock itself... Jean II */ - spin_unlock_irqrestore(&lp->lock, flags); - - /* Enable MAC Tx and RX, set MAC modes, ... */ - hp100_set_multicast_list(dev); -} - -static void hp100_stop_interface(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - u_int val; - -#ifdef HP100_DEBUG_B - printk("hp100: %s: hp100_stop_interface\n", dev->name); - hp100_outw(0x4221, TRACE); -#endif - - if (lp->mode == 1) - hp100_BM_shutdown(dev); - else { - /* Note: MMAP_DIS will be reenabled by start_interface */ - hp100_outw(HP100_INT_EN | HP100_RESET_LB | - HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB, - OPTION_LSW); - val = hp100_inw(OPTION_LSW); - - hp100_page(MAC_CTRL); - hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); - - if (!(val & HP100_HW_RST)) - return; /* If reset, imm. return ... */ - /* ... else: busy wait until idle */ - for (val = 0; val < 6000; val++) - if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) { - hp100_page(PERFORMANCE); - return; - } - printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name); - hp100_page(PERFORMANCE); - } -} - -static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr) -{ - int i; - int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4222, TRACE); -#endif - - hp100_page(EEPROM_CTRL); - hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL); - hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL); - for (i = 0; i < 10000; i++) - if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD)) - return; - printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name); -} - -/* Sense connection status. - * return values: LAN_10 - Connected to 10Mbit/s network - * LAN_100 - Connected to 100Mbit/s network - * LAN_ERR - not connected or 100Mbit/s Hub down - */ -static int hp100_sense_lan(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - u_short val_VG, val_10; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4223, TRACE); -#endif - - hp100_page(MAC_CTRL); - val_10 = hp100_inb(10_LAN_CFG_1); - val_VG = hp100_inb(VG_LAN_CFG_1); - hp100_page(PERFORMANCE); -#ifdef HP100_DEBUG - printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n", - dev->name, val_VG, val_10); -#endif - - if (val_10 & HP100_LINK_BEAT_ST) /* 10Mb connection is active */ - return HP100_LAN_10; - - if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */ - /* - * This can be overriden by dos utility, so if this has no effect, - * perhaps you need to download that utility from HP and set card - * back to "auto detect". - */ - val_10 |= HP100_AUI_SEL | HP100_LOW_TH; - hp100_page(MAC_CTRL); - hp100_outb(val_10, 10_LAN_CFG_1); - hp100_page(PERFORMANCE); - return HP100_LAN_COAX; - } - - /* Those cards don't have a 100 Mbit connector */ - if ( !strcmp(lp->id, "HWP1920") || - (lp->pci_dev && - lp->pci_dev->vendor == PCI_VENDOR_ID && - (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A || - lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A))) - return HP100_LAN_ERR; - - if (val_VG & HP100_LINK_CABLE_ST) /* Can hear the HUBs tone. */ - return HP100_LAN_100; - return HP100_LAN_ERR; -} - -static int hp100_down_vg_link(struct net_device *dev) -{ - struct hp100_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long time; - long savelan, newlan; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4224, TRACE); - printk("hp100: %s: down_vg_link\n", dev->name); -#endif - - hp100_page(MAC_CTRL); - time = jiffies + (HZ / 4); - do { - if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - - if (time_after_eq(jiffies, time)) /* no signal->no logout */ - return 0; - - /* Drop the VG Link by clearing the link up cmd and load addr. */ - - hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1); - hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1); - - /* Conditionally stall for >250ms on Link-Up Status (to go down) */ - time = jiffies + (HZ / 2); - do { - if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - -#ifdef HP100_DEBUG - if (time_after_eq(jiffies, time)) - printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name); -#endif - - /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */ - /* logout under traffic (even though all the status bits are cleared), */ - /* do this workaround to get the Rev 1 MAC in its idle state */ - if (lp->chip == HP100_CHIPID_LASSEN) { - /* Reset VG MAC to insure it leaves the logoff state even if */ - /* the Hub is still emitting tones */ - hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1); - udelay(1500); /* wait for >1ms */ - hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */ - udelay(1500); - } - - /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */ - /* to get the VG mac to full reset. This is not req.d with later chips */ - /* Note: It will take the between 1 and 2 seconds for the VG mac to be */ - /* selected again! This will be left to the connect hub function to */ - /* perform if desired. */ - if (lp->chip == HP100_CHIPID_LASSEN) { - /* Have to write to 10 and 100VG control registers simultaneously */ - savelan = newlan = hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */ - newlan &= ~(HP100_VG_SEL << 16); - newlan |= (HP100_DOT3_MAC) << 8; - hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */ - hp100_outl(newlan, 10_LAN_CFG_1); - - /* Conditionally stall for 5sec on VG selected. */ - time = jiffies + (HZ * 5); - do { - if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - - hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */ - hp100_outl(savelan, 10_LAN_CFG_1); - } - - time = jiffies + (3 * HZ); /* Timeout 3s */ - do { - if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - - if (time_before_eq(time, jiffies)) { -#ifdef HP100_DEBUG - printk("hp100: %s: down_vg_link: timeout\n", dev->name); -#endif - return -EIO; - } - - time = jiffies + (2 * HZ); /* This seems to take a while.... */ - do { - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - - return 0; -} - -static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - u_short val = 0; - unsigned long time; - int startst; - -#ifdef HP100_DEBUG_B - hp100_outw(0x4225, TRACE); - printk("hp100: %s: login_to_vg_hub\n", dev->name); -#endif - - /* Initiate a login sequence iff VG MAC is enabled and either Load Address - * bit is zero or the force relogin flag is set (e.g. due to MAC address or - * promiscuous mode change) - */ - hp100_page(MAC_CTRL); - startst = hp100_inb(VG_LAN_CFG_1); - if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) { -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: Start training\n", dev->name); -#endif - - /* Ensure VG Reset bit is 1 (i.e., do not reset) */ - hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); - - /* If Lassen AND auto-select-mode AND VG tones were sensed on */ - /* entry then temporarily put them into force 100Mbit mode */ - if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) - hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2); - - /* Drop the VG link by zeroing Link Up Command and Load Address */ - hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1); - -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: Bring down the link\n", dev->name); -#endif - - /* Wait for link to drop */ - time = jiffies + (HZ / 10); - do { - if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - - /* Start an addressed training and optionally request promiscuous port */ - if ((dev->flags) & IFF_PROMISC) { - hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2); - if (lp->chip == HP100_CHIPID_LASSEN) - hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST); - } else { - hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2); - /* For ETR parts we need to reset the prom. bit in the training - * register, otherwise promiscious mode won't be disabled. - */ - if (lp->chip == HP100_CHIPID_LASSEN) { - hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST); - } - } - - /* With ETR parts, frame format request bits can be set. */ - if (lp->chip == HP100_CHIPID_LASSEN) - hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST); - - hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1); - - /* Note: Next wait could be omitted for Hood and earlier chips under */ - /* certain circumstances */ - /* TODO: check if hood/earlier and skip wait. */ - - /* Wait for either short timeout for VG tones or long for login */ - /* Wait for the card hardware to signalise link cable status ok... */ - hp100_page(MAC_CTRL); - time = jiffies + (1 * HZ); /* 1 sec timeout for cable st */ - do { - if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) - break; - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_before(jiffies, time)); - - if (time_after_eq(jiffies, time)) { -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name); -#endif - } else { -#ifdef HP100_DEBUG_TRAINING - printk - ("hp100: %s: HUB tones detected. Trying to train.\n", - dev->name); -#endif - - time = jiffies + (2 * HZ); /* again a timeout */ - do { - val = hp100_inb(VG_LAN_CFG_1); - if ((val & (HP100_LINK_UP_ST))) { -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: Passed training.\n", dev->name); -#endif - break; - } - if (!in_interrupt()) - schedule_timeout_interruptible(1); - } while (time_after(time, jiffies)); - } - - /* If LINK_UP_ST is set, then we are logged into the hub. */ - if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) { -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: Successfully logged into the HUB.\n", dev->name); - if (lp->chip == HP100_CHIPID_LASSEN) { - val = hp100_inw(TRAIN_ALLOW); - printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ", - dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre"); - printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre"); - printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3"); - } -#endif - } else { - /* If LINK_UP_ST is not set, login was not successful */ - printk("hp100: %s: Problem logging into the HUB.\n", dev->name); - if (lp->chip == HP100_CHIPID_LASSEN) { - /* Check allowed Register to find out why there is a problem. */ - val = hp100_inw(TRAIN_ALLOW); /* won't work on non-ETR card */ -#ifdef HP100_DEBUG_TRAINING - printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val); -#endif - if (val & HP100_MALLOW_ACCDENIED) - printk("hp100: %s: HUB access denied.\n", dev->name); - if (val & HP100_MALLOW_CONFIGURE) - printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name); - if (val & HP100_MALLOW_DUPADDR) - printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name); - } - } - - /* If we have put the chip into forced 100 Mbit mode earlier, go back */ - /* to auto-select mode */ - - if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) { - hp100_page(MAC_CTRL); - hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2); - } - - val = hp100_inb(VG_LAN_CFG_1); - - /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */ - hp100_page(PERFORMANCE); - hp100_outw(HP100_MISC_ERROR, IRQ_STATUS); - - if (val & HP100_LINK_UP_ST) - return 0; /* login was ok */ - else { - printk("hp100: %s: Training failed.\n", dev->name); - hp100_down_vg_link(dev); - return -EIO; - } - } - /* no forced relogin & already link there->no training. */ - return -EIO; -} - -static void hp100_cascade_reset(struct net_device *dev, u_short enable) -{ - int ioaddr = dev->base_addr; - struct hp100_private *lp = netdev_priv(dev); - -#ifdef HP100_DEBUG_B - hp100_outw(0x4226, TRACE); - printk("hp100: %s: cascade_reset\n", dev->name); -#endif - - if (enable) { - hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW); - if (lp->chip == HP100_CHIPID_LASSEN) { - /* Lassen requires a PCI transmit fifo reset */ - hp100_page(HW_MAP); - hp100_andb(~HP100_PCI_RESET, PCICTRL2); - hp100_orb(HP100_PCI_RESET, PCICTRL2); - /* Wait for min. 300 ns */ - /* we can't use jiffies here, because it may be */ - /* that we have disabled the timer... */ - udelay(400); - hp100_andb(~HP100_PCI_RESET, PCICTRL2); - hp100_page(PERFORMANCE); - } - } else { /* bring out of reset */ - hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW); - udelay(400); - hp100_page(PERFORMANCE); - } -} - -#ifdef HP100_DEBUG -void hp100_RegisterDump(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int Page; - int Register; - - /* Dump common registers */ - printk("hp100: %s: Cascade Register Dump\n", dev->name); - printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID)); - printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING)); - printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW)); - printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW)); - - /* Dump paged registers */ - for (Page = 0; Page < 8; Page++) { - /* Dump registers */ - printk("page: 0x%.2x\n", Page); - outw(Page, ioaddr + 0x02); - for (Register = 0x8; Register < 0x22; Register += 2) { - /* Display Register contents except data port */ - if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) { - printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register)); - } - } - } - hp100_page(PERFORMANCE); -} -#endif - - -static void cleanup_dev(struct net_device *d) -{ - struct hp100_private *p = netdev_priv(d); - - unregister_netdev(d); - release_region(d->base_addr, HP100_REGION_SIZE); - - if (p->mode == 1) /* busmaster */ - pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f, - p->page_vaddr_algn, - virt_to_whatever(d, p->page_vaddr_algn)); - if (p->mem_ptr_virt) - iounmap(p->mem_ptr_virt); - - free_netdev(d); -} - -static int hp100_eisa_probe(struct device *gendev) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); - struct eisa_device *edev = to_eisa_device(gendev); - int err; - - if (!dev) - return -ENOMEM; - - SET_NETDEV_DEV(dev, &edev->dev); - - err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL); - if (err) - goto out1; - -#ifdef HP100_DEBUG - printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name, - dev->base_addr); -#endif - dev_set_drvdata(gendev, dev); - return 0; - out1: - free_netdev(dev); - return err; -} - -static int hp100_eisa_remove(struct device *gendev) -{ - struct net_device *dev = dev_get_drvdata(gendev); - cleanup_dev(dev); - return 0; -} - -static struct eisa_driver hp100_eisa_driver = { - .id_table = hp100_eisa_tbl, - .driver = { - .name = "hp100", - .probe = hp100_eisa_probe, - .remove = hp100_eisa_remove, - } -}; - -static int hp100_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - int ioaddr; - u_short pci_command; - int err; - - if (pci_enable_device(pdev)) - return -ENODEV; - - dev = alloc_etherdev(sizeof(struct hp100_private)); - if (!dev) { - err = -ENOMEM; - goto out0; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - - pci_read_config_word(pdev, PCI_COMMAND, &pci_command); - if (!(pci_command & PCI_COMMAND_IO)) { -#ifdef HP100_DEBUG - printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name); -#endif - pci_command |= PCI_COMMAND_IO; - pci_write_config_word(pdev, PCI_COMMAND, pci_command); - } - - if (!(pci_command & PCI_COMMAND_MASTER)) { -#ifdef HP100_DEBUG - printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name); -#endif - pci_command |= PCI_COMMAND_MASTER; - pci_write_config_word(pdev, PCI_COMMAND, pci_command); - } - - ioaddr = pci_resource_start(pdev, 0); - err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev); - if (err) - goto out1; - -#ifdef HP100_DEBUG - printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr); -#endif - pci_set_drvdata(pdev, dev); - return 0; - out1: - free_netdev(dev); - out0: - pci_disable_device(pdev); - return err; -} - -static void hp100_pci_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - cleanup_dev(dev); - pci_disable_device(pdev); -} - - -static struct pci_driver hp100_pci_driver = { - .name = "hp100", - .id_table = hp100_pci_tbl, - .probe = hp100_pci_probe, - .remove = hp100_pci_remove, -}; - -/* - * module section - */ - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, " - "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>"); -MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters"); - -/* - * Note: to register three isa devices, use: - * option hp100 hp100_port=0,0,0 - * to register one card at io 0x280 as eth239, use: - * option hp100 hp100_port=0x280 - */ -#if defined(MODULE) && defined(CONFIG_ISA) -#define HP100_DEVICES 5 -/* Parameters set by insmod */ -static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 }; -module_param_hw_array(hp100_port, int, ioport, NULL, 0); - -/* List of devices */ -static struct net_device *hp100_devlist[HP100_DEVICES]; - -static int __init hp100_isa_init(void) -{ - struct net_device *dev; - int i, err, cards = 0; - - /* Don't autoprobe ISA bus */ - if (hp100_port[0] == 0) - return -ENODEV; - - /* Loop on all possible base addresses */ - for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) { - dev = alloc_etherdev(sizeof(struct hp100_private)); - if (!dev) { - while (cards > 0) - cleanup_dev(hp100_devlist[--cards]); - - return -ENOMEM; - } - - err = hp100_isa_probe(dev, hp100_port[i]); - if (!err) - hp100_devlist[cards++] = dev; - else - free_netdev(dev); - } - - return cards > 0 ? 0 : -ENODEV; -} - -static void hp100_isa_cleanup(void) -{ - int i; - - for (i = 0; i < HP100_DEVICES; i++) { - struct net_device *dev = hp100_devlist[i]; - if (dev) - cleanup_dev(dev); - } -} -#else -#define hp100_isa_init() (0) -#define hp100_isa_cleanup() do { } while(0) -#endif - -static int __init hp100_module_init(void) -{ - int err; - - err = hp100_isa_init(); - if (err && err != -ENODEV) - goto out; - err = eisa_driver_register(&hp100_eisa_driver); - if (err && err != -ENODEV) - goto out2; - err = pci_register_driver(&hp100_pci_driver); - if (err && err != -ENODEV) - goto out3; - out: - return err; - out3: - eisa_driver_unregister (&hp100_eisa_driver); - out2: - hp100_isa_cleanup(); - goto out; -} - - -static void __exit hp100_module_exit(void) -{ - hp100_isa_cleanup(); - eisa_driver_unregister (&hp100_eisa_driver); - pci_unregister_driver (&hp100_pci_driver); -} - -module_init(hp100_module_init) -module_exit(hp100_module_exit) diff --git a/drivers/staging/hp/hp100.h b/drivers/staging/hp/hp100.h deleted file mode 100644 index 7239b94c9de5..000000000000 --- a/drivers/staging/hp/hp100.h +++ /dev/null @@ -1,611 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux. - * - * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $ - * - * Authors: Jaroslav Kysela, <perex@pf.jcu.cz> - * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de> - * - * This driver is based on the 'hpfepkt' crynwr packet driver. - */ - -/**************************************************************************** - * Hardware Constants - ****************************************************************************/ - -/* - * Page Identifiers - * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02) - */ - -#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */ -#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */ -#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */ -#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */ -#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */ -#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */ -#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */ -#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */ - - -/* Registers that are present on all pages */ - -#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */ -#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */ -#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */ - /* W: (16),3:0 Switch pages */ -#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */ -#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */ - -/* Page 0 - Performance */ - -#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */ -#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */ -#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */ -/* Note: For 32 bit systems, fragment len and offset registers are available */ -/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */ -#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */ -#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */ -#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */ -#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */ -#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */ -#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */ -#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */ -#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */ -#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */ -#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */ -#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */ - /* which point to a PDL */ -#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */ -#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */ -#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */ -#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */ -#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */ -#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */ -#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */ - -/* Page 1 - MAC Address/Hash Table */ - -#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */ -#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */ - -/* Page 2 - Hardware Mapping */ - -#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */ -#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */ -#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */ -#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */ -#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */ -#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */ - -/* New on Page 2 for ETR chips: */ -#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */ -#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */ -#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */ -#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */ -#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */ -#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */ -#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */ -#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */ -#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */ - -/* Page 3 - EEPROM/Boot ROM */ - -#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */ -#define HP100_REG_BOOTROM_CTRL 0x0a - -/* Page 4 - LAN Configuration (MAC_CTRL) */ - -#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */ -#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */ -#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */ -#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */ -#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */ -#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */ -#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */ -#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */ -#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts can't fit in mem */ -#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */ -#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */ -#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */ -#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */ - -/* Page 5 - MMU */ - -#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */ -#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */ -#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */ -#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */ - -/* Page 6 - Card ID/Physical LAN Address */ - -#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */ -#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */ -#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */ -#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */ -#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */ - -/* Page 7 - MMU Current Pointers */ - -#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */ -#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */ -#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */ -#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */ -#define HP100_REG_PTR_RPDLSTART 0x10 -#define HP100_REG_PTR_RPDLEND 0x12 -#define HP100_REG_PTR_RINGPTRS 0x14 -#define HP100_REG_PTR_MEMDEBUG 0x1a -/* ------------------------------------------------------------------------ */ - - -/* - * Hardware ID Register I (Always available, HW_ID, Offset 0x00) - */ -#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */ - -/* - * Hardware ID Register 2 & Paging Register - * (Always available, PAGING, Offset 0x02) - * Bits 15:4 are for the Chip ID - */ -#define HP100_CHIPID_MASK 0xFFF0 -#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */ - /* EISA BM/SL, MCA16/32 SL, ISA SL */ -#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM, */ - /* PCI SL, MCA16/32 SL, ISA SL */ -#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */ - /* LRF supported */ - -/* - * Option Registers I and II - * (Always available, OPTION_LSW, Offset 0x04-0x05) - */ -#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */ -#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */ - /* system mem. before Rx interrupt */ -#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */ - /* MMAP_DIS must be 0 and MEM_EN */ - /* must be 1 for memory-mapped */ - /* mode to be enabled */ -#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */ -#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */ -#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */ -#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */ -#define HP100_MEM_EN 0x0040 /* Config program set this to */ - /* 0:Disable, 1:Enable mem map. */ - /* See MMAP_DIS. */ -#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */ -#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */ -#define HP100_FAKE_INT 0x0008 /* 1:int */ -#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */ -#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */ - /* NIC reset on 0 to 1 transition */ - -/* - * Option Register III - * (Always available, OPTION_MSW, Offset 0x06) - */ -#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */ -#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */ -#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */ - /* h/w will set to 0 when done */ -#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */ - /* will set to 0 when done */ - -/* - * Interrupt Status Registers I and II - * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09) - * Note: With old chips, these Registers will clear when 1 is written to them - * with new chips this depends on setting of CLR_ISMODE - */ -#define HP100_RX_EARLY_INT 0x2000 -#define HP100_RX_PDA_ZERO 0x1000 -#define HP100_RX_PDL_FILL_COMPL 0x0800 -#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */ -#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */ -#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */ -#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */ -#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */ -#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error */ -#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */ - -/* - * Xmit Memory Free Count - * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit) - */ -#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */ -#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */ - -/* - * IRQ Channel - * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d) - */ -#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */ -#define HP100_IRQ_SCRAMBLE 0x40 -#define HP100_BOND_HP 0x20 -#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */ - /* (Only valid on EISA cards) */ -#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */ - -/* - * SRAM Parameters - * (Page HW_MAP, SRAM, Offset 0x0e) - */ -#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */ -#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits) */ - -/* - * Bus Master Register - * (Page HW_MAP, BM, Offset 0x0f) - */ -#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */ - /* memory to chip (tx) */ -#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */ - /* memory to chip (rx) */ -#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */ -#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in */ - /* an EISA system */ -#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */ - - -/* - * Mode Control Register I - * (Page HW_MAP, MODECTRL1, Offset0x10) - */ -#define HP100_TX_DUALQ 0x10 - /* If set and BM -> dual tx pda queues */ -#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */ - /* interrupts on read (etr only?) */ -#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */ - /* from the eeprom */ -#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */ -#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */ - /* first three data elements of a PDL */ - /* on the first access. */ -#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */ - -/* - * Mode Control Register II - * (Page HW_MAP, MODECTRL2, Offset0x11) - */ -#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */ - /* certain resources */ -#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */ -#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */ - /* written back to system mem */ -#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */ - /* interrupt */ - -/* - * PCI Configuration and Control Register I - * (Page HW_MAP, PCICTRL1, Offset 0x12) - */ -#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */ -#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */ - /* bios */ -#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */ - /* simultaneously with PCI decodes */ -#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */ -#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */ - -/* - * PCI Configuration and Control Register II - * (Page HW_MAP, PCICTRL2, Offset 0x13) - */ -#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */ -#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */ -#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */ -#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */ -#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */ - /* pci stop if cascade not ready */ -#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity */ -#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */ - -/* - * Early TX Configuration and Control Register - * (Page HW_MAP, EARLYTXCFG, Offset 0x16) - */ -#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */ -#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */ -#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */ -#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */ -#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */ - -/* - * Early RX Configuration and Control Register - * (Page HW_MAP, EARLYRXCFG, Offset 0x18) - */ -#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */ -#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */ -#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the - * early rx circuit will start the - * dma of received packet into system - * memory for BM */ - -/* - * Serial Devices Control Register - * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08) - */ -#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */ - /* When it goes back to 0, load is */ - /* complete. This should take ~600us. */ - -/* - * 10MB LAN Control and Configuration Register I - * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08) - */ -#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */ -#define HP100_AUI_SEL 0x20 /* Status of AUI selection */ -#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */ -#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */ -#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */ -#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */ - /* been reversed */ -#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */ - -/* - * 10 MB LAN Control and Configuration Register II - * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09) - */ -#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */ - /* after Tx.Only used for AUI. */ -#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */ -#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */ - -/* - * MAC Selection, use with MAC10_SEL bits - */ -#define HP100_AUTO_SEL_10 0x0 /* Auto select */ -#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */ -#define HP100_XCVR_7213 0x2 /* 7213 transceiver */ -#define HP100_XCVR_82503 0x3 /* 82503 transceiver */ - -/* - * 100MB LAN Training Register - * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12) - */ -#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */ -#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */ -#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */ - /* promiscuous */ -#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */ - /* be a cascaded repeater */ - -/* - * 100MB LAN Control and Configuration Register - * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a) - */ -#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */ -#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */ -#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */ - /* from hub */ -#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */ - /* 100ms later the link status */ - /* bits are valid */ -#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */ - /* 100ms later the link status */ - /* bits are valid */ -#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */ - /* after LinkUp Cmd is given and set */ - /* when training has completed. */ -#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */ -#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */ - - -/* - * MAC Configuration Register I - * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c) - */ -#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */ -#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */ -#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */ -#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */ -#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */ -#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */ -#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */ -#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */ -#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */ -#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */ -#define HP100_MAC1MODE2 0x00 -#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC -#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC -#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */ -#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */ -/* Note MODE6 will receive all GOOD packets on the LAN. This really needs - a mode 7 defined to be LAN Analyzer mode, which will receive errored and - runt packets, and keep the CRC bytes. */ -#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED - -/* - * MAC Configuration Register II - * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d) - */ -#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */ -#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */ -#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */ - /* transceiver */ -#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */ -#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */ -#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring - * group addr that maches NA mask */ -#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */ - /* The length will reflect this. */ -#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional - * addrs that match FA mask (page1) */ -#define HP100_MAC2MODEMASK 0x02 -#define HP100_MAC2MODE1 0x00 -#define HP100_MAC2MODE2 0x00 -#define HP100_MAC2MODE3 0x00 -#define HP100_MAC2MODE4 0x00 -#define HP100_MAC2MODE5 0x00 -#define HP100_MAC2MODE6 0x00 -#define HP100_MAC2MODE7 KEEP_CRC - -/* - * MAC Configuration Register III - * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e) - */ -#define HP100_PACKET_PACE 0x03 /* Packet Pacing: - * 00: No packet pacing - * 01: 8 to 16 uS delay - * 10: 16 to 32 uS delay - * 11: 32 to 64 uS delay - */ -#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and - * TCP/IP Checksumming enabled. */ -#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */ - -/* - * MAC Configuration Register IV - * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f) - */ -#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL - * Signal, 1=100VG, 0=10Mbit sel. */ -#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion - * of the Misc. Interrupt */ - -/* - * 100 MB LAN Training Request/Allowed Registers - * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only) - */ -#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be - * a cascaded repeater - * 0: ... wants to be a DTE */ -#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode - * 00: Rcv only unicast packets - * specifically addr to this - * endnode - * 10: Rcv all pckts fwded by - * the local repeater */ -#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */ -#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */ -#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */ -#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */ -#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an - * end node is allowed */ -#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode - * 00: Rcv only unicast packets - * specifically addr to this - * endnode - * 10: Rcv all pckts fwded by - * the local repeater */ -#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format - * 00: 802.3 format will be used - * 10: 802.5 format will be used */ -#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */ -#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */ -#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */ -#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */ - /* protocol of repeater */ - -/* ****************************************************************************** */ - -/* - * Set/Reset bits - */ -#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */ -#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */ -#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */ -#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */ - -/* - * Misc. Constants - */ -#define HP100_LAN_100 100 /* lan_type value for VG */ -#define HP100_LAN_10 10 /* lan_type value for 10BaseT */ -#define HP100_LAN_COAX 9 /* lan_type value for Coax */ -#define HP100_LAN_ERR (-1) /* lan_type value for link down */ - -/* - * Bus Master Data Structures ---------------------------------------------- - */ - -#define MAX_RX_PDL 30 /* Card limit = 31 */ -#define MAX_RX_FRAG 2 /* Don't need more... */ -#define MAX_TX_PDL 29 -#define MAX_TX_FRAG 2 /* Limit = 31 */ - -/* Define total PDL area size in bytes (should be 4096) */ -/* This is the size of kernel (dma) memory that will be allocated. */ -#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16 - -/* Ethernet Packet Sizes */ -#define MIN_ETHER_SIZE 60 -#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */ - /* skb buffer when busmastering */ - -/* Tx or Rx Ring Entry */ -typedef struct hp100_ring { - u_int *pdl; /* Address of PDLs PDH, dword before - * this address is used for rx hdr */ - u_int pdl_paddr; /* Physical address of PDL */ - struct sk_buff *skb; - struct hp100_ring *next; -} hp100_ring_t; - - - -/* Mask for Header Descriptor */ -#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */ - - -/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED - bit in the MAC Configuration Register 1 is set. */ -#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */ -#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */ -#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */ -#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */ -#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */ - /* marker */ -#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */ -#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */ -#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */ -#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */ - /* Length Reg. */ -#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */ -#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */ - -/* The last three bits indicate the type of destination address */ - -#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */ -#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */ -#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */ -#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */ -#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */ - -/* - * macros - */ - -#define hp100_inb( reg ) \ - inb( ioaddr + HP100_REG_##reg ) -#define hp100_inw( reg ) \ - inw( ioaddr + HP100_REG_##reg ) -#define hp100_inl( reg ) \ - inl( ioaddr + HP100_REG_##reg ) -#define hp100_outb( data, reg ) \ - outb( data, ioaddr + HP100_REG_##reg ) -#define hp100_outw( data, reg ) \ - outw( data, ioaddr + HP100_REG_##reg ) -#define hp100_outl( data, reg ) \ - outl( data, ioaddr + HP100_REG_##reg ) -#define hp100_orb( data, reg ) \ - outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg ) -#define hp100_orw( data, reg ) \ - outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg ) -#define hp100_andb( data, reg ) \ - outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg ) -#define hp100_andw( data, reg ) \ - outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg ) - -#define hp100_page( page ) \ - outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING ) -#define hp100_ints_off() \ - outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW ) -#define hp100_ints_on() \ - outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW ) -#define hp100_mem_map_enable() \ - outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW ) -#define hp100_mem_map_disable() \ - outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW ) diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 b/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 deleted file mode 100644 index 1c35c507cc05..000000000000 --- a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 +++ /dev/null @@ -1,20 +0,0 @@ -What: /sys/.../iio:deviceX/ac_excitation_en -KernelVersion: 3.1.0 -Contact: linux-iio@vger.kernel.org -Description: - This attribute, if available, is used to enable the AC - excitation mode found on some converters. In ac excitation mode, - the polarity of the excitation voltage is reversed on - alternate cycles, to eliminate DC errors. - -What: /sys/.../iio:deviceX/bridge_switch_en -KernelVersion: 3.1.0 -Contact: linux-iio@vger.kernel.org -Description: - This attribute, if available, is used to close or open the - bridge power down switch found on some converters. - In bridge applications, such as strain gauges and load cells, - the bridge itself consumes the majority of the current in the - system. To minimize the current consumption of the system, - the bridge can be disconnected (when it is not being used - using the bridge_switch_en attribute. diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO index 1b8ebf2c1b69..4d469016a13a 100644 --- a/drivers/staging/iio/TODO +++ b/drivers/staging/iio/TODO @@ -1,10 +1,4 @@ -2018-04-15 - -All affected drivers: -Convert all uses of the old GPIO API from <linux/gpio.h> to the -GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO -lines from device tree, ACPI or board files, board files should -use <linux/gpio/machine.h>. +2020-02-25 ADI Drivers: diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c index 39dfe3f7f254..fef52d9b5346 100644 --- a/drivers/staging/iio/accel/adis16203.c +++ b/drivers/staging/iio/accel/adis16203.c @@ -250,6 +250,7 @@ static const struct adis_data adis16203_data = { .diag_stat_reg = ADIS16203_DIAG_STAT, .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN, + .self_test_reg = ADIS16203_MSC_CTRL, .self_test_no_autoclear = true, .timeouts = &adis16203_timeouts, diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c index 39eb8364aa95..8bd35c6c56a1 100644 --- a/drivers/staging/iio/accel/adis16240.c +++ b/drivers/staging/iio/accel/adis16240.c @@ -373,6 +373,7 @@ static const struct adis_data adis16240_data = { .diag_stat_reg = ADIS16240_DIAG_STAT, .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN, + .self_test_reg = ADIS16240_MSC_CTRL, .self_test_no_autoclear = true, .timeouts = &adis16240_timeouts, diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig index 31cd9a12f40f..b25f41053fac 100644 --- a/drivers/staging/iio/adc/Kconfig +++ b/drivers/staging/iio/adc/Kconfig @@ -15,18 +15,6 @@ config AD7816 To compile this driver as a module, choose M here: the module will be called ad7816. -config AD7192 - tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver" - depends on SPI - select AD_SIGMA_DELTA - help - Say yes here to build support for Analog Devices AD7190, - AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC). - If unsure, say N (but it's safe to say "Y"). - - To compile this driver as a module, choose M here: the - module will be called ad7192. - config AD7280 tristate "Analog Devices AD7280A Lithium Ion Battery Monitoring System" depends on SPI diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile index 4b76769b32bc..6436a62b6278 100644 --- a/drivers/staging/iio/adc/Makefile +++ b/drivers/staging/iio/adc/Makefile @@ -4,5 +4,4 @@ # obj-$(CONFIG_AD7816) += ad7816.o -obj-$(CONFIG_AD7192) += ad7192.o obj-$(CONFIG_AD7280) += ad7280a.o diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index 19a5f244dcae..bef6bd1295ea 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -824,6 +824,10 @@ out: return IRQ_HANDLED; } +/* Note: No need to fix checkpatch warning that reads: + * CHECK: spaces preferred around that '-' (ctx:VxV) + * The function argument is stringified and doesn't need a fix + */ static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value, in_voltage-voltage_thresh_low_value, 0644, diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 93cf28febdf6..7b00d7069e21 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -110,10 +110,10 @@ static ssize_t cpld_reconfigure(struct device *dev, const char *buf, size_t count) { struct kp2000_device *pcard = dev_get_drvdata(dev); - long wr_val; + unsigned long wr_val; int rv; - rv = kstrtol(buf, 0, &wr_val); + rv = kstrtoul(buf, 0, &wr_val); if (rv < 0) return rv; if (wr_val > 7) diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c index 1c360daa703d..44017d523da5 100644 --- a/drivers/staging/kpc2000/kpc2000_spi.c +++ b/drivers/staging/kpc2000/kpc2000_spi.c @@ -386,8 +386,8 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m) } } - if (transfer->delay_usecs) - udelay(transfer->delay_usecs); + if (transfer->delay.value) + ndelay(spi_delay_to_ns(&transfer->delay, transfer)); } /* de-assert chip select to end the sequence */ diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c index 51a4dd534a0d..452a3f7c835d 100644 --- a/drivers/staging/kpc2000/kpc_dma/dma.c +++ b/drivers/staging/kpc2000/kpc_dma/dma.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> @@ -97,11 +97,10 @@ int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt) if (WARN(!(caps & ENG_CAP_PRESENT), "%s() called for DMA Engine at %p which isn't present in hardware!\n", __func__, eng)) return -ENXIO; - if (caps & ENG_CAP_DIRECTION) { + if (caps & ENG_CAP_DIRECTION) eng->dir = DMA_FROM_DEVICE; - } else { + else eng->dir = DMA_TO_DEVICE; - } eng->desc_pool_cnt = desc_cnt; eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096); @@ -236,7 +235,7 @@ int count_descriptors_available(struct kpc_dma_device *eng) struct kpc_dma_descriptor *cur = eng->desc_next; while (cur != eng->desc_completed) { - BUG_ON(cur == NULL); + BUG_ON(!cur); count++; cur = cur->Next; } diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c index 40525540dde6..7caabdd77bbf 100644 --- a/drivers/staging/kpc2000/kpc_dma/fileops.c +++ b/drivers/staging/kpc2000/kpc_dma/fileops.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> @@ -18,8 +18,8 @@ static inline unsigned int count_pages(unsigned long iov_base, size_t iov_len) { - unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT; - unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT; + unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT; + unsigned long last = ((iov_base + iov_len - 1) & PAGE_MASK) >> PAGE_SHIFT; return last - first + 1; } @@ -66,7 +66,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv, acd->page_count = count_pages(iov_base, iov_len); // Allocate an array of page pointers - acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL); + acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *), + GFP_KERNEL); if (!acd->user_pages) { dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n"); rv = -ENOMEM; @@ -83,7 +84,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv, } // Allocate and setup the sg_table (scatterlist entries) - rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL); + rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL); if (rv) { dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv); goto err_alloc_sg_table; @@ -124,19 +125,19 @@ static int kpc_dma_transfer(struct dev_private_data *priv, pcnt = count_parts_for_sge(sg); for (p = 0 ; p < pcnt ; p++) { // Fill out the descriptor - BUG_ON(desc == NULL); + BUG_ON(!desc); clear_desc(desc); - if (p != pcnt-1) { + if (p != pcnt - 1) desc->DescByteCount = 0x80000; - } else { + else desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000); - } + desc->DescBufferByteCount = desc->DescByteCount; desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR; if (i == 0 && p == 0) desc->DescControlFlags |= DMA_DESC_CTL_SOP; - if (i == acd->mapped_entry_count-1 && p == pcnt-1) + if (i == acd->mapped_entry_count - 1 && p == pcnt - 1) desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE; desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF); @@ -148,13 +149,13 @@ static int kpc_dma_transfer(struct dev_private_data *priv, desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32; user_ctl = acd->priv->user_ctl; - if (i == acd->mapped_entry_count-1 && p == pcnt-1) { + if (i == acd->mapped_entry_count - 1 && p == pcnt - 1) user_ctl = acd->priv->user_ctl_last; - } + desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0; desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32; - if (i == acd->mapped_entry_count-1 && p == pcnt-1) + if (i == acd->mapped_entry_count - 1 && p == pcnt - 1) desc->acd = acd; dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd); @@ -188,9 +189,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv, sg_free_table(&acd->sgt); err_dma_map_sg: err_alloc_sg_table: - for (i = 0 ; i < acd->page_count ; i++) { + for (i = 0 ; i < acd->page_count ; i++) put_page(acd->user_pages[i]); - } + err_get_user_pages: kfree(acd->user_pages); err_alloc_userpages: @@ -203,23 +204,21 @@ void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags) { unsigned int i; - BUG_ON(acd == NULL); - BUG_ON(acd->user_pages == NULL); - BUG_ON(acd->sgt.sgl == NULL); - BUG_ON(acd->ldev == NULL); - BUG_ON(acd->ldev->pldev == NULL); + BUG_ON(!acd); + BUG_ON(!acd->user_pages); + BUG_ON(!acd->sgt.sgl); + BUG_ON(!acd->ldev); + BUG_ON(!acd->ldev->pldev); for (i = 0 ; i < acd->page_count ; i++) { - if (!PageReserved(acd->user_pages[i])) { + if (!PageReserved(acd->user_pages[i])) set_page_dirty(acd->user_pages[i]); - } } dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir); - for (i = 0 ; i < acd->page_count ; i++) { + for (i = 0 ; i < acd->page_count ; i++) put_page(acd->user_pages[i]); - } sg_free_table(&acd->sgt); @@ -253,7 +252,7 @@ int kpc_dma_open(struct inode *inode, struct file *filp) return -EBUSY; /* already open */ } - priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c index ec79a8500caf..c3b30551e0ca 100644 --- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> -#include <asm/io.h> +#include <linux/io.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/platform_device.h> @@ -26,9 +26,8 @@ struct kpc_dma_device *kpc_dma_lookup_device(int minor) mutex_lock(&kpc_dma_mtx); list_for_each_entry(c, &kpc_dma_list, list) { - if (c->pldev->id == minor) { + if (c->pldev->id == minor) goto out; - } } c = NULL; // not-found case out: @@ -98,7 +97,7 @@ int kpc_dma_probe(struct platform_device *pldev) int rv = 0; dev_t dev; - struct kpc_dma_device *ldev = kzalloc(sizeof(struct kpc_dma_device), GFP_KERNEL); + struct kpc_dma_device *ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { dev_err(&pldev->dev, "%s: unable to kzalloc space for kpc_dma_device\n", __func__); diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h index 4c8cc866b826..8b9c978257b9 100644 --- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h @@ -198,14 +198,14 @@ u32 GetEngineCompletePtr(struct kpc_dma_device *eng) static inline void lock_engine(struct kpc_dma_device *eng) { - BUG_ON(eng == NULL); + BUG_ON(!eng); mutex_lock(&eng->sem); } static inline void unlock_engine(struct kpc_dma_device *eng) { - BUG_ON(eng == NULL); + BUG_ON(!eng); mutex_unlock(&eng->sem); } diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c index 4b379542ecd5..6b2660c94f4e 100644 --- a/drivers/staging/ks7010/ks7010_sdio.c +++ b/drivers/staging/ks7010/ks7010_sdio.c @@ -446,7 +446,8 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size) DUMP_PREFIX_OFFSET, rx_buffer->data, 32); #endif - ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE); + ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, + REG_STATUS_IDLE); if (ret) netdev_err(priv->net_dev, "write READ_STATUS_REG\n"); diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h index ca7dc8f5166c..39138191a556 100644 --- a/drivers/staging/ks7010/ks_hostif.h +++ b/drivers/staging/ks7010/ks_hostif.h @@ -70,7 +70,7 @@ struct hostif_data_request { #define TYPE_DATA 0x0000 #define TYPE_AUTH 0x0001 __le16 reserved; - u8 data[0]; + u8 data[]; } __packed; #define TYPE_PMK1 0x0001 @@ -194,7 +194,7 @@ enum mib_data_type { struct hostif_mib_value { __le16 size; __le16 type; - u8 body[0]; + u8 body[]; } __packed; struct hostif_mib_get_confirm_t { diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index c394abffea86..e59a846bc909 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -42,4 +42,8 @@ source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig" source "drivers/staging/media/rkisp1/Kconfig" +if MEDIA_ANALOG_TV_SUPPORT +source "drivers/staging/media/usbvision/Kconfig" +endif + endif diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index ea9fce8014bb..23c682461b62 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/ obj-$(CONFIG_SOC_CAMERA) += soc_camera/ obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/ obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/ +obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ diff --git a/drivers/staging/media/allegro-dvt/Makefile b/drivers/staging/media/allegro-dvt/Makefile index 80817160815c..8e306dcdc55c 100644 --- a/drivers/staging/media/allegro-dvt/Makefile +++ b/drivers/staging/media/allegro-dvt/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -allegro-objs := allegro-core.o nal-h264.o +allegro-objs := allegro-core.o nal-h264.o allegro-mail.o obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c index 3be41698df4c..70f133a842dd 100644 --- a/drivers/staging/media/allegro-dvt/allegro-core.c +++ b/drivers/staging/media/allegro-dvt/allegro-core.c @@ -5,7 +5,9 @@ * Allegro DVT video encoder driver */ +#include <linux/bits.h> #include <linux/firmware.h> +#include <linux/gcd.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> @@ -26,6 +28,7 @@ #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-v4l2.h> +#include "allegro-mail.h" #include "nal-h264.h" /* @@ -40,6 +43,8 @@ #define ALLEGRO_HEIGHT_DEFAULT 1080 #define ALLEGRO_HEIGHT_MAX 2160 +#define ALLEGRO_FRAMERATE_DEFAULT ((struct v4l2_fract) { 30, 1 }) + #define ALLEGRO_GOP_SIZE_DEFAULT 25 #define ALLEGRO_GOP_SIZE_MAX 1000 @@ -176,6 +181,7 @@ struct allegro_channel { unsigned int width; unsigned int height; unsigned int stride; + struct v4l2_fract framerate; enum v4l2_colorspace colorspace; enum v4l2_ycbcr_encoding ycbcr_enc; @@ -192,7 +198,7 @@ struct allegro_channel { unsigned int sizeimage_encoded; unsigned int csequence; - enum v4l2_mpeg_video_bitrate_mode bitrate_mode; + bool frame_rc_enable; unsigned int bitrate; unsigned int bitrate_peak; unsigned int cpb_size; @@ -200,9 +206,17 @@ struct allegro_channel { struct v4l2_ctrl *mpeg_video_h264_profile; struct v4l2_ctrl *mpeg_video_h264_level; - struct v4l2_ctrl *mpeg_video_bitrate_mode; - struct v4l2_ctrl *mpeg_video_bitrate; - struct v4l2_ctrl *mpeg_video_bitrate_peak; + struct v4l2_ctrl *mpeg_video_h264_i_frame_qp; + struct v4l2_ctrl *mpeg_video_h264_max_qp; + struct v4l2_ctrl *mpeg_video_h264_min_qp; + struct v4l2_ctrl *mpeg_video_h264_p_frame_qp; + struct v4l2_ctrl *mpeg_video_h264_b_frame_qp; + struct v4l2_ctrl *mpeg_video_frame_rc_enable; + struct { /* video bitrate mode control cluster */ + struct v4l2_ctrl *mpeg_video_bitrate_mode; + struct v4l2_ctrl *mpeg_video_bitrate; + struct v4l2_ctrl *mpeg_video_bitrate_peak; + }; struct v4l2_ctrl *mpeg_video_cpb_size; struct v4l2_ctrl *mpeg_video_gop_size; @@ -215,6 +229,11 @@ struct allegro_channel { struct list_head buffers_reference; struct list_head buffers_intermediate; + struct list_head source_shadow_list; + struct list_head stream_shadow_list; + /* protect shadow lists of buffers passed to firmware */ + struct mutex shadow_list_lock; + struct list_head list; struct completion completion; @@ -236,6 +255,14 @@ allegro_get_state(struct allegro_channel *channel) return channel->state; } +struct allegro_m2m_buffer { + struct v4l2_m2m_buffer buf; + struct list_head head; +}; + +#define to_allegro_m2m_buffer(__buf) \ + container_of(__buf, struct allegro_m2m_buffer, buf) + struct fw_info { unsigned int id; unsigned int id_codec; @@ -258,276 +285,33 @@ static const struct fw_info supported_firmware[] = { }, }; -enum mcu_msg_type { - MCU_MSG_TYPE_INIT = 0x0000, - MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005, - MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006, - MCU_MSG_TYPE_ENCODE_FRAME = 0x0007, - MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012, - MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e, - MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f, -}; +static inline u32 to_mcu_addr(struct allegro_dev *dev, dma_addr_t phys) +{ + if (upper_32_bits(phys) || (lower_32_bits(phys) & MCU_CACHE_OFFSET)) + v4l2_warn(&dev->v4l2_dev, + "address %pad is outside mcu window\n", &phys); + + return lower_32_bits(phys) | MCU_CACHE_OFFSET; +} -static const char *msg_type_name(enum mcu_msg_type type) +static inline u32 to_mcu_size(struct allegro_dev *dev, size_t size) { - static char buf[9]; + return lower_32_bits(size); +} - switch (type) { - case MCU_MSG_TYPE_INIT: - return "INIT"; - case MCU_MSG_TYPE_CREATE_CHANNEL: - return "CREATE_CHANNEL"; - case MCU_MSG_TYPE_DESTROY_CHANNEL: - return "DESTROY_CHANNEL"; - case MCU_MSG_TYPE_ENCODE_FRAME: - return "ENCODE_FRAME"; - case MCU_MSG_TYPE_PUT_STREAM_BUFFER: - return "PUT_STREAM_BUFFER"; - case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE: - return "PUSH_BUFFER_INTERMEDIATE"; - case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE: - return "PUSH_BUFFER_REFERENCE"; - default: - snprintf(buf, sizeof(buf), "(0x%04x)", type); - return buf; - } -} - -struct mcu_msg_header { - u16 length; /* length of the body in bytes */ - u16 type; -} __attribute__ ((__packed__)); - -struct mcu_msg_init_request { - struct mcu_msg_header header; - u32 reserved0; /* maybe a unused channel id */ - u32 suballoc_dma; - u32 suballoc_size; - s32 l2_cache[3]; -} __attribute__ ((__packed__)); - -struct mcu_msg_init_response { - struct mcu_msg_header header; - u32 reserved0; -} __attribute__ ((__packed__)); - -struct mcu_msg_create_channel { - struct mcu_msg_header header; - u32 user_id; - u16 width; - u16 height; - u32 format; - u32 colorspace; - u32 src_mode; - u8 profile; - u16 constraint_set_flags; - s8 codec; - u16 level; - u16 tier; - u32 sps_param; - u32 pps_param; - - u32 enc_option; -#define AL_OPT_WPP BIT(0) -#define AL_OPT_TILE BIT(1) -#define AL_OPT_LF BIT(2) -#define AL_OPT_LF_X_SLICE BIT(3) -#define AL_OPT_LF_X_TILE BIT(4) -#define AL_OPT_SCL_LST BIT(5) -#define AL_OPT_CONST_INTRA_PRED BIT(6) -#define AL_OPT_QP_TAB_RELATIVE BIT(7) -#define AL_OPT_FIX_PREDICTOR BIT(8) -#define AL_OPT_CUSTOM_LDA BIT(9) -#define AL_OPT_ENABLE_AUTO_QP BIT(10) -#define AL_OPT_ADAPT_AUTO_QP BIT(11) -#define AL_OPT_TRANSFO_SKIP BIT(13) -#define AL_OPT_FORCE_REC BIT(15) -#define AL_OPT_FORCE_MV_OUT BIT(16) -#define AL_OPT_FORCE_MV_CLIP BIT(17) -#define AL_OPT_LOWLAT_SYNC BIT(18) -#define AL_OPT_LOWLAT_INT BIT(19) -#define AL_OPT_RDO_COST_MODE BIT(20) - - s8 beta_offset; - s8 tc_offset; - u16 reserved10; - u32 unknown11; - u32 unknown12; - u16 num_slices; - u16 prefetch_auto; - u32 prefetch_mem_offset; - u32 prefetch_mem_size; - u16 clip_hrz_range; - u16 clip_vrt_range; - u16 me_range[4]; - u8 max_cu_size; - u8 min_cu_size; - u8 max_tu_size; - u8 min_tu_size; - u8 max_transfo_depth_inter; - u8 max_transfo_depth_intra; - u16 reserved20; - u32 entropy_mode; - u32 wp_mode; - - /* rate control param */ - u32 rate_control_mode; - u32 initial_rem_delay; - u32 cpb_size; - u16 framerate; - u16 clk_ratio; - u32 target_bitrate; - u32 max_bitrate; - u16 initial_qp; - u16 min_qp; - u16 max_qp; - s16 ip_delta; - s16 pb_delta; - u16 golden_ref; - u16 golden_delta; - u16 golden_ref_frequency; - u32 rate_control_option; - - /* gop param */ - u32 gop_ctrl_mode; - u32 freq_ird; - u32 freq_lt; - u32 gdr_mode; - u32 gop_length; - u32 unknown39; - - u32 subframe_latency; - u32 lda_control_mode; -} __attribute__ ((__packed__)); - -struct mcu_msg_create_channel_response { - struct mcu_msg_header header; - u32 channel_id; - u32 user_id; - u32 options; - u32 num_core; - u32 pps_param; - u32 int_buffers_count; - u32 int_buffers_size; - u32 rec_buffers_count; - u32 rec_buffers_size; - u32 reserved; - u32 error_code; -} __attribute__ ((__packed__)); - -struct mcu_msg_destroy_channel { - struct mcu_msg_header header; - u32 channel_id; -} __attribute__ ((__packed__)); - -struct mcu_msg_destroy_channel_response { - struct mcu_msg_header header; - u32 channel_id; -} __attribute__ ((__packed__)); - -struct mcu_msg_push_buffers_internal_buffer { - u32 dma_addr; - u32 mcu_addr; - u32 size; -} __attribute__ ((__packed__)); - -struct mcu_msg_push_buffers_internal { - struct mcu_msg_header header; - u32 channel_id; - struct mcu_msg_push_buffers_internal_buffer buffer[0]; -} __attribute__ ((__packed__)); - -struct mcu_msg_put_stream_buffer { - struct mcu_msg_header header; - u32 channel_id; - u32 dma_addr; - u32 mcu_addr; - u32 size; - u32 offset; - u64 stream_id; -} __attribute__ ((__packed__)); - -struct mcu_msg_encode_frame { - struct mcu_msg_header header; - u32 channel_id; - u32 reserved; - - u32 encoding_options; -#define AL_OPT_USE_QP_TABLE BIT(0) -#define AL_OPT_FORCE_LOAD BIT(1) -#define AL_OPT_USE_L2 BIT(2) -#define AL_OPT_DISABLE_INTRA BIT(3) -#define AL_OPT_DEPENDENT_SLICES BIT(4) - - s16 pps_qp; - u16 padding; - u64 user_param; - u64 src_handle; +static inline u32 to_codec_addr(struct allegro_dev *dev, dma_addr_t phys) +{ + if (upper_32_bits(phys)) + v4l2_warn(&dev->v4l2_dev, + "address %pad cannot be used by codec\n", &phys); - u32 request_options; -#define AL_OPT_SCENE_CHANGE BIT(0) -#define AL_OPT_RESTART_GOP BIT(1) -#define AL_OPT_USE_LONG_TERM BIT(2) -#define AL_OPT_UPDATE_PARAMS BIT(3) - - /* u32 scene_change_delay (optional) */ - /* rate control param (optional) */ - /* gop param (optional) */ - u32 src_y; - u32 src_uv; - u32 stride; - u32 ep2; - u64 ep2_v; -} __attribute__ ((__packed__)); - -struct mcu_msg_encode_frame_response { - struct mcu_msg_header header; - u32 channel_id; - u64 stream_id; /* see mcu_msg_put_stream_buffer */ - u64 user_param; /* see mcu_msg_encode_frame */ - u64 src_handle; /* see mcu_msg_encode_frame */ - u16 skip; - u16 is_ref; - u32 initial_removal_delay; - u32 dpb_output_delay; - u32 size; - u32 frame_tag_size; - s32 stuffing; - s32 filler; - u16 num_column; - u16 num_row; - u16 qp; - u8 num_ref_idx_l0; - u8 num_ref_idx_l1; - u32 partition_table_offset; - s32 partition_table_size; - u32 sum_complex; - s32 tile_width[4]; - s32 tile_height[22]; - u32 error_code; - - u32 slice_type; -#define AL_ENC_SLICE_TYPE_B 0 -#define AL_ENC_SLICE_TYPE_P 1 -#define AL_ENC_SLICE_TYPE_I 2 - - u32 pic_struct; - u8 is_idr; - u8 is_first_slice; - u8 is_last_slice; - u8 reserved; - u16 pps_qp; - u16 reserved1; - u32 reserved2; -} __attribute__ ((__packed__)); - -union mcu_msg_response { - struct mcu_msg_header header; - struct mcu_msg_init_response init; - struct mcu_msg_create_channel_response create_channel; - struct mcu_msg_destroy_channel_response destroy_channel; - struct mcu_msg_encode_frame_response encode_frame; -}; + return lower_32_bits(phys); +} + +static inline u64 ptr_to_u64(const void *ptr) +{ + return (uintptr_t)ptr; +} /* Helper functions for channel and user operations */ @@ -572,6 +356,56 @@ static inline bool channel_exists(struct allegro_channel *channel) return channel->mcu_channel_id != -1; } +#define AL_ERROR 0x80 +#define AL_ERR_INIT_FAILED 0x81 +#define AL_ERR_NO_FRAME_DECODED 0x82 +#define AL_ERR_RESOLUTION_CHANGE 0x85 +#define AL_ERR_NO_MEMORY 0x87 +#define AL_ERR_STREAM_OVERFLOW 0x88 +#define AL_ERR_TOO_MANY_SLICES 0x89 +#define AL_ERR_BUF_NOT_READY 0x8c +#define AL_ERR_NO_CHANNEL_AVAILABLE 0x8d +#define AL_ERR_RESOURCE_UNAVAILABLE 0x8e +#define AL_ERR_NOT_ENOUGH_CORES 0x8f +#define AL_ERR_REQUEST_MALFORMED 0x90 +#define AL_ERR_CMD_NOT_ALLOWED 0x91 +#define AL_ERR_INVALID_CMD_VALUE 0x92 + +static inline const char *allegro_err_to_string(unsigned int err) +{ + switch (err) { + case AL_ERR_INIT_FAILED: + return "initialization failed"; + case AL_ERR_NO_FRAME_DECODED: + return "no frame decoded"; + case AL_ERR_RESOLUTION_CHANGE: + return "resolution change"; + case AL_ERR_NO_MEMORY: + return "out of memory"; + case AL_ERR_STREAM_OVERFLOW: + return "stream buffer overflow"; + case AL_ERR_TOO_MANY_SLICES: + return "too many slices"; + case AL_ERR_BUF_NOT_READY: + return "buffer not ready"; + case AL_ERR_NO_CHANNEL_AVAILABLE: + return "no channel available"; + case AL_ERR_RESOURCE_UNAVAILABLE: + return "resource unavailable"; + case AL_ERR_NOT_ENOUGH_CORES: + return "not enough cores"; + case AL_ERR_REQUEST_MALFORMED: + return "request malformed"; + case AL_ERR_CMD_NOT_ALLOWED: + return "command not allowed"; + case AL_ERR_INVALID_CMD_VALUE: + return "invalid command value"; + case AL_ERROR: + default: + return "unknown error"; + } +} + static unsigned int estimate_stream_size(unsigned int width, unsigned int height) { @@ -781,7 +615,7 @@ static int allegro_mbox_write(struct allegro_dev *dev, if (size > mbox->size) { v4l2_err(&dev->v4l2_dev, - "message (%zu bytes) to large for mailbox (%zu bytes)\n", + "message (%zu bytes) too large for mailbox (%zu bytes)\n", size, mbox->size); return -EINVAL; } @@ -894,8 +728,8 @@ static void allegro_mcu_send_init(struct allegro_dev *dev, msg.header.type = MCU_MSG_TYPE_INIT; msg.header.length = sizeof(msg) - sizeof(msg.header); - msg.suballoc_dma = lower_32_bits(suballoc_dma) | MCU_CACHE_OFFSET; - msg.suballoc_size = suballoc_size; + msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma); + msg.suballoc_size = to_mcu_size(dev, suballoc_size); /* disable L2 cache */ msg.l2_cache[0] = -1; @@ -1001,6 +835,103 @@ v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode) } } +static u32 v4l2_cpb_size_to_mcu(unsigned int cpb_size, unsigned int bitrate) +{ + unsigned int cpb_size_kbit; + unsigned int bitrate_kbps; + + /* + * The mcu expects the CPB size in units of a 90 kHz clock, but the + * channel follows the V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE and stores + * the CPB size in kilobytes. + */ + cpb_size_kbit = cpb_size * BITS_PER_BYTE; + bitrate_kbps = bitrate / 1000; + + return (cpb_size_kbit * 90000) / bitrate_kbps; +} + +static s16 get_qp_delta(int minuend, int subtrahend) +{ + if (minuend == subtrahend) + return -1; + else + return minuend - subtrahend; +} + +static int fill_create_channel_param(struct allegro_channel *channel, + struct create_channel_param *param) +{ + int i_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_i_frame_qp); + int p_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_p_frame_qp); + int b_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_b_frame_qp); + int bitrate_mode = v4l2_ctrl_g_ctrl(channel->mpeg_video_bitrate_mode); + + param->width = channel->width; + param->height = channel->height; + param->format = v4l2_pixelformat_to_mcu_format(channel->pixelformat); + param->colorspace = + v4l2_colorspace_to_mcu_colorspace(channel->colorspace); + param->src_mode = 0x0; + param->profile = v4l2_profile_to_mcu_profile(channel->profile); + param->constraint_set_flags = BIT(1); + param->codec = v4l2_pixelformat_to_mcu_codec(channel->codec); + param->level = v4l2_level_to_mcu_level(channel->level); + param->tier = 0; + param->sps_param = BIT(20) | 0x4a; + param->pps_param = BIT(2); + param->enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE | + AL_OPT_LF_X_SLICE | AL_OPT_LF; + param->beta_offset = -1; + param->tc_offset = -1; + param->num_slices = 1; + param->me_range[0] = 8; + param->me_range[1] = 8; + param->me_range[2] = 16; + param->me_range[3] = 16; + param->max_cu_size = ilog2(SIZE_MACROBLOCK); + param->min_cu_size = ilog2(8); + param->max_tu_size = 2; + param->min_tu_size = 2; + param->max_transfo_depth_intra = 1; + param->max_transfo_depth_inter = 1; + + param->prefetch_auto = 0; + param->prefetch_mem_offset = 0; + param->prefetch_mem_size = 0; + + param->rate_control_mode = channel->frame_rc_enable ? + v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0; + + param->cpb_size = v4l2_cpb_size_to_mcu(channel->cpb_size, + channel->bitrate_peak); + /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */ + param->initial_rem_delay = param->cpb_size; + param->framerate = DIV_ROUND_UP(channel->framerate.numerator, + channel->framerate.denominator); + param->clk_ratio = channel->framerate.denominator == 1001 ? 1001 : 1000; + param->target_bitrate = channel->bitrate; + param->max_bitrate = channel->bitrate_peak; + param->initial_qp = i_frame_qp; + param->min_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_min_qp); + param->max_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_max_qp); + param->ip_delta = get_qp_delta(i_frame_qp, p_frame_qp); + param->pb_delta = get_qp_delta(p_frame_qp, b_frame_qp); + param->golden_ref = 0; + param->golden_delta = 2; + param->golden_ref_frequency = 10; + param->rate_control_option = 0x00000000; + + param->gop_ctrl_mode = 0x00000000; + param->freq_idr = channel->gop_size; + param->freq_lt = 0; + param->gdr_mode = 0x00000000; + param->gop_length = channel->gop_size; + param->subframe_latency = 0x00000000; + + return 0; +} + static int allegro_mcu_send_create_channel(struct allegro_dev *dev, struct allegro_channel *channel) { @@ -1012,63 +943,8 @@ static int allegro_mcu_send_create_channel(struct allegro_dev *dev, msg.header.length = sizeof(msg) - sizeof(msg.header); msg.user_id = channel->user_id; - msg.width = channel->width; - msg.height = channel->height; - msg.format = v4l2_pixelformat_to_mcu_format(channel->pixelformat); - msg.colorspace = v4l2_colorspace_to_mcu_colorspace(channel->colorspace); - msg.src_mode = 0x0; - msg.profile = v4l2_profile_to_mcu_profile(channel->profile); - msg.constraint_set_flags = BIT(1); - msg.codec = v4l2_pixelformat_to_mcu_codec(channel->codec); - msg.level = v4l2_level_to_mcu_level(channel->level); - msg.tier = 0; - msg.sps_param = BIT(20) | 0x4a; - msg.pps_param = BIT(2); - msg.enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE | - AL_OPT_LF_X_SLICE | AL_OPT_LF; - msg.beta_offset = -1; - msg.tc_offset = -1; - msg.num_slices = 1; - msg.me_range[0] = 8; - msg.me_range[1] = 8; - msg.me_range[2] = 16; - msg.me_range[3] = 16; - msg.max_cu_size = ilog2(SIZE_MACROBLOCK); - msg.min_cu_size = ilog2(8); - msg.max_tu_size = 2; - msg.min_tu_size = 2; - msg.max_transfo_depth_intra = 1; - msg.max_transfo_depth_inter = 1; - - msg.rate_control_mode = - v4l2_bitrate_mode_to_mcu_mode(channel->bitrate_mode); - /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */ - msg.initial_rem_delay = - ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000; - /* Encoder expects cpb_size in units of a 90 kHz clock. */ - msg.cpb_size = - ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000; - msg.framerate = 25; - msg.clk_ratio = 1000; - msg.target_bitrate = channel->bitrate; - msg.max_bitrate = channel->bitrate_peak; - msg.initial_qp = 25; - msg.min_qp = 10; - msg.max_qp = 51; - msg.ip_delta = -1; - msg.pb_delta = -1; - msg.golden_ref = 0; - msg.golden_delta = 2; - msg.golden_ref_frequency = 10; - msg.rate_control_option = 0x00000000; - - msg.gop_ctrl_mode = 0x00000000; - msg.freq_ird = 0x7fffffff; - msg.freq_lt = 0; - msg.gdr_mode = 0x00000000; - msg.gop_length = channel->gop_size; - msg.subframe_latency = 0x00000000; - msg.lda_control_mode = 0x700d0000; + + fill_create_channel_param(channel, &msg.param); allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg)); allegro_mcu_interrupt(dev); @@ -1097,7 +973,8 @@ static int allegro_mcu_send_destroy_channel(struct allegro_dev *dev, static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev, struct allegro_channel *channel, dma_addr_t paddr, - unsigned long size) + unsigned long size, + u64 stream_id) { struct mcu_msg_put_stream_buffer msg; @@ -1107,11 +984,12 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev, msg.header.length = sizeof(msg) - sizeof(msg.header); msg.channel_id = channel->mcu_channel_id; - msg.dma_addr = paddr; - msg.mcu_addr = paddr | MCU_CACHE_OFFSET; + msg.dma_addr = to_codec_addr(dev, paddr); + msg.mcu_addr = to_mcu_addr(dev, paddr); msg.size = size; msg.offset = ENCODER_STREAM_OFFSET; - msg.stream_id = 0; /* copied to mcu_msg_encode_frame_response */ + /* copied to mcu_msg_encode_frame_response */ + msg.stream_id = stream_id; allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg)); allegro_mcu_interrupt(dev); @@ -1121,7 +999,8 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev, static int allegro_mcu_send_encode_frame(struct allegro_dev *dev, struct allegro_channel *channel, - dma_addr_t src_y, dma_addr_t src_uv) + dma_addr_t src_y, dma_addr_t src_uv, + u64 src_handle) { struct mcu_msg_encode_frame msg; @@ -1134,12 +1013,13 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev, msg.encoding_options = AL_OPT_FORCE_LOAD; msg.pps_qp = 26; /* qp are relative to 26 */ msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */ - msg.src_handle = 0; /* copied to mcu_msg_encode_frame_response */ - msg.src_y = src_y; - msg.src_uv = src_uv; + /* src_handle is copied to mcu_msg_encode_frame_response */ + msg.src_handle = src_handle; + msg.src_y = to_codec_addr(dev, src_y); + msg.src_uv = to_codec_addr(dev, src_uv); msg.stride = channel->stride; msg.ep2 = 0x0; - msg.ep2_v = msg.ep2 | MCU_CACHE_OFFSET; + msg.ep2_v = to_mcu_addr(dev, msg.ep2); allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg)); allegro_mcu_interrupt(dev); @@ -1198,10 +1078,9 @@ static int allegro_mcu_push_buffer_internal(struct allegro_channel *channel, buffer = msg->buffer; list_for_each_entry(al_buffer, list, head) { - buffer->dma_addr = lower_32_bits(al_buffer->paddr); - buffer->mcu_addr = - lower_32_bits(al_buffer->paddr) | MCU_CACHE_OFFSET; - buffer->size = al_buffer->size; + buffer->dma_addr = to_codec_addr(dev, al_buffer->paddr); + buffer->mcu_addr = to_mcu_addr(dev, al_buffer->paddr); + buffer->size = to_mcu_size(dev, al_buffer->size); buffer++; } @@ -1360,9 +1239,11 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel, sps->vui.chroma_loc_info_present_flag = 1; sps->vui.chroma_sample_loc_type_top_field = 0; sps->vui.chroma_sample_loc_type_bottom_field = 0; + sps->vui.timing_info_present_flag = 1; - sps->vui.num_units_in_tick = 1; - sps->vui.time_scale = 50; + sps->vui.num_units_in_tick = channel->framerate.denominator; + sps->vui.time_scale = 2 * channel->framerate.numerator; + sps->vui.fixed_frame_rate_flag = 1; sps->vui.nal_hrd_parameters_present_flag = 0; sps->vui.vcl_hrd_parameters_present_flag = 1; @@ -1375,7 +1256,8 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel, /* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */ sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] = (channel->cpb_size * 1000) / (1 << (4 + sps->vui.vcl_hrd_parameters.cpb_size_scale)) - 1; - sps->vui.vcl_hrd_parameters.cbr_flag[0] = 1; + sps->vui.vcl_hrd_parameters.cbr_flag[0] = + !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable); sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31; sps->vui.vcl_hrd_parameters.cpb_removal_delay_length_minus1 = 31; sps->vui.vcl_hrd_parameters.dpb_output_delay_length_minus1 = 31; @@ -1438,8 +1320,11 @@ static bool allegro_channel_is_at_eos(struct allegro_channel *channel) break; case ALLEGRO_STATE_DRAIN: case ALLEGRO_STATE_WAIT_FOR_BUFFER: - if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0) + mutex_lock(&channel->shadow_list_lock); + if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0 && + list_empty(&channel->source_shadow_list)) is_at_eos = true; + mutex_unlock(&channel->shadow_list_lock); break; default: break; @@ -1466,6 +1351,41 @@ static void allegro_channel_buf_done(struct allegro_channel *channel, v4l2_m2m_buf_done(buf, state); } +static u64 allegro_put_buffer(struct allegro_channel *channel, + struct list_head *list, + struct vb2_v4l2_buffer *buffer) +{ + struct v4l2_m2m_buffer *b = container_of(buffer, + struct v4l2_m2m_buffer, vb); + struct allegro_m2m_buffer *shadow = to_allegro_m2m_buffer(b); + + mutex_lock(&channel->shadow_list_lock); + list_add_tail(&shadow->head, list); + mutex_unlock(&channel->shadow_list_lock); + + return ptr_to_u64(buffer); +} + +static struct vb2_v4l2_buffer * +allegro_get_buffer(struct allegro_channel *channel, + struct list_head *list, u64 handle) +{ + struct allegro_m2m_buffer *shadow, *tmp; + struct vb2_v4l2_buffer *buffer = NULL; + + mutex_lock(&channel->shadow_list_lock); + list_for_each_entry_safe(shadow, tmp, list, head) { + if (handle == ptr_to_u64(&shadow->buf.vb)) { + buffer = &shadow->buf.vb; + list_del_init(&shadow->head); + break; + } + } + mutex_unlock(&channel->shadow_list_lock); + + return buffer; +} + static void allegro_channel_finish_frame(struct allegro_channel *channel, struct mcu_msg_encode_frame_response *msg) { @@ -1481,15 +1401,31 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel, ssize_t len; ssize_t free; - src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx); + src_buf = allegro_get_buffer(channel, &channel->source_shadow_list, + msg->src_handle); + if (!src_buf) + v4l2_warn(&dev->v4l2_dev, + "channel %d: invalid source buffer\n", + channel->mcu_channel_id); + + dst_buf = allegro_get_buffer(channel, &channel->stream_shadow_list, + msg->stream_id); + if (!dst_buf) + v4l2_warn(&dev->v4l2_dev, + "channel %d: invalid stream buffer\n", + channel->mcu_channel_id); + + if (!src_buf || !dst_buf) + goto err; - dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx); dst_buf->sequence = channel->csequence++; - if (msg->error_code) { + if (msg->error_code & AL_ERROR) { v4l2_err(&dev->v4l2_dev, - "channel %d: error while encoding frame: %x\n", - channel->mcu_channel_id, msg->error_code); + "channel %d: failed to encode frame: %s (%x)\n", + channel->mcu_channel_id, + allegro_err_to_string(msg->error_code), + msg->error_code); goto err; } @@ -1562,17 +1498,22 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel, channel->mcu_channel_id, len); } - len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free); - if (len < 0) { - v4l2_err(&dev->v4l2_dev, - "failed to write %zd filler data\n", free); - goto err; + if (msg->slice_type != AL_ENC_SLICE_TYPE_I && !msg->is_idr) { + dst_buf->vb2_buf.planes[0].data_offset = free; + free = 0; + } else { + len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free); + if (len < 0) { + v4l2_err(&dev->v4l2_dev, + "failed to write %zd filler data\n", free); + goto err; + } + curr += len; + free -= len; + v4l2_dbg(2, debug, &dev->v4l2_dev, + "channel %d: wrote %zd bytes filler nal unit\n", + channel->mcu_channel_id, len); } - curr += len; - free -= len; - v4l2_dbg(2, debug, &dev->v4l2_dev, - "channel %d: wrote %zd bytes filler nal unit\n", - channel->mcu_channel_id, len); if (free != 0) { v4l2_err(&dev->v4l2_dev, @@ -1590,20 +1531,20 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel, dst_buf->flags |= V4L2_BUF_FLAG_PFRAME; v4l2_dbg(1, debug, &dev->v4l2_dev, - "channel %d: encoded frame #%03d (%s%s, %d bytes)\n", + "channel %d: encoded frame #%03d (%s%s, QP %d, %d bytes)\n", channel->mcu_channel_id, dst_buf->sequence, msg->is_idr ? "IDR, " : "", msg->slice_type == AL_ENC_SLICE_TYPE_I ? "I slice" : msg->slice_type == AL_ENC_SLICE_TYPE_P ? "P slice" : "unknown", - partition->size); + msg->qp, partition->size); err: - v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); - - allegro_channel_buf_done(channel, dst_buf, state); + if (src_buf) + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); - v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx); + if (dst_buf) + allegro_channel_buf_done(channel, dst_buf, state); } static int allegro_handle_init(struct allegro_dev *dev, @@ -1621,6 +1562,12 @@ allegro_handle_create_channel(struct allegro_dev *dev, struct allegro_channel *channel; int err = 0; + if (msg->header.length != sizeof(*msg) - sizeof(msg->header)) + v4l2_warn(&dev->v4l2_dev, + "received message has %d bytes, but expected %zu\n", + msg->header.length, + sizeof(*msg) - sizeof(msg->header)); + channel = allegro_find_channel_by_user_id(dev, msg->user_id); if (IS_ERR(channel)) { v4l2_warn(&dev->v4l2_dev, @@ -1632,8 +1579,10 @@ allegro_handle_create_channel(struct allegro_dev *dev, if (msg->error_code) { v4l2_err(&dev->v4l2_dev, - "user %d: mcu failed to create channel: error %x\n", - channel->user_id, msg->error_code); + "user %d: mcu failed to create channel: %s (%x)\n", + channel->user_id, + allegro_err_to_string(msg->error_code), + msg->error_code); err = -EIO; goto out; } @@ -1712,6 +1661,12 @@ allegro_handle_encode_frame(struct allegro_dev *dev, { struct allegro_channel *channel; + if (msg->header.length != sizeof(*msg) - sizeof(msg->header)) + v4l2_warn(&dev->v4l2_dev, + "received message has %d bytes, but expected %zu\n", + msg->header.length, + sizeof(*msg) - sizeof(msg->header)); + channel = allegro_find_channel_by_channel_id(dev, msg->channel_id); if (IS_ERR(channel)) { v4l2_err(&dev->v4l2_dev, @@ -1920,6 +1875,14 @@ static int allegro_mcu_reset(struct allegro_dev *dev) { int err; + /* + * Ensure that the AL5_MCU_WAKEUP bit is set to 0 otherwise the mcu + * does not go to sleep after the reset. + */ + err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0); + if (err) + return err; + err = regmap_write(dev->regmap, AL5_MCU_RESET_MODE, AL5_MCU_RESET_MODE_SLEEP); if (err < 0) @@ -1955,6 +1918,12 @@ static void allegro_destroy_channel(struct allegro_channel *channel) v4l2_ctrl_grab(channel->mpeg_video_h264_profile, false); v4l2_ctrl_grab(channel->mpeg_video_h264_level, false); + v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, false); + v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, false); + v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, false); + v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, false); + v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, false); + v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, false); @@ -2000,7 +1969,9 @@ static int allegro_create_channel(struct allegro_channel *channel) v4l2_dbg(1, debug, &dev->v4l2_dev, "user %d: creating channel (%4.4s, %dx%d@%d)\n", channel->user_id, - (char *)&channel->codec, channel->width, channel->height, 25); + (char *)&channel->codec, channel->width, channel->height, + DIV_ROUND_UP(channel->framerate.numerator, + channel->framerate.denominator)); min_level = select_minimum_h264_level(channel->width, channel->height); if (channel->level < min_level) { @@ -2014,6 +1985,12 @@ static int allegro_create_channel(struct allegro_channel *channel) v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true); v4l2_ctrl_grab(channel->mpeg_video_h264_level, true); + v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, true); + v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, true); + v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, true); + v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, true); + v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, true); + v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, true); @@ -2046,6 +2023,7 @@ static void allegro_set_default_params(struct allegro_channel *channel) channel->width = ALLEGRO_WIDTH_DEFAULT; channel->height = ALLEGRO_HEIGHT_DEFAULT; channel->stride = round_up(channel->width, 32); + channel->framerate = ALLEGRO_FRAMERATE_DEFAULT; channel->colorspace = V4L2_COLORSPACE_REC709; channel->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; @@ -2062,7 +2040,6 @@ static void allegro_set_default_params(struct allegro_channel *channel) channel->sizeimage_encoded = estimate_stream_size(channel->width, channel->height); - channel->bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; channel->bitrate = maximum_bitrate(channel->level); channel->bitrate_peak = maximum_bitrate(channel->level); channel->cpb_size = maximum_cpb_size(channel->level); @@ -2163,16 +2140,33 @@ static void allegro_stop_streaming(struct vb2_queue *q) struct allegro_channel *channel = vb2_get_drv_priv(q); struct allegro_dev *dev = channel->dev; struct vb2_v4l2_buffer *buffer; + struct allegro_m2m_buffer *shadow, *tmp; v4l2_dbg(2, debug, &dev->v4l2_dev, "%s: stop streaming\n", V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture"); if (V4L2_TYPE_IS_OUTPUT(q->type)) { + mutex_lock(&channel->shadow_list_lock); + list_for_each_entry_safe(shadow, tmp, + &channel->source_shadow_list, head) { + list_del(&shadow->head); + v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR); + } + mutex_unlock(&channel->shadow_list_lock); + allegro_set_state(channel, ALLEGRO_STATE_STOPPED); while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx))) v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR); } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + mutex_lock(&channel->shadow_list_lock); + list_for_each_entry_safe(shadow, tmp, + &channel->stream_shadow_list, head) { + list_del(&shadow->head); + v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR); + } + mutex_unlock(&channel->shadow_list_lock); + allegro_destroy_channel(channel); while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx))) v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR); @@ -2203,7 +2197,7 @@ static int allegro_queue_init(void *priv, src_vq->drv_priv = channel; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->ops = &allegro_queue_ops; - src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + src_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer); src_vq->lock = &channel->dev->lock; err = vb2_queue_init(src_vq); if (err) @@ -2216,7 +2210,7 @@ static int allegro_queue_init(void *priv, dst_vq->drv_priv = channel; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->ops = &allegro_queue_ops; - dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + dst_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer); dst_vq->lock = &channel->dev->lock; err = vb2_queue_init(dst_vq); if (err) @@ -2225,6 +2219,52 @@ static int allegro_queue_init(void *priv, return 0; } +static int allegro_clamp_qp(struct allegro_channel *channel, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl *next_ctrl; + + if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP) + next_ctrl = channel->mpeg_video_h264_p_frame_qp; + else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP) + next_ctrl = channel->mpeg_video_h264_b_frame_qp; + else + return 0; + + /* Modify range automatically updates the value */ + __v4l2_ctrl_modify_range(next_ctrl, ctrl->val, 51, 1, ctrl->val); + + return allegro_clamp_qp(channel, next_ctrl); +} + +static int allegro_clamp_bitrate(struct allegro_channel *channel, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl *ctrl_bitrate = channel->mpeg_video_bitrate; + struct v4l2_ctrl *ctrl_bitrate_peak = channel->mpeg_video_bitrate_peak; + + if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + ctrl_bitrate_peak->val < ctrl_bitrate->val) + ctrl_bitrate_peak->val = ctrl_bitrate->val; + + return 0; +} + +static int allegro_try_ctrl(struct v4l2_ctrl *ctrl) +{ + struct allegro_channel *channel = container_of(ctrl->handler, + struct allegro_channel, + ctrl_handler); + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + allegro_clamp_bitrate(channel, ctrl); + break; + } + + return 0; +} + static int allegro_s_ctrl(struct v4l2_ctrl *ctrl) { struct allegro_channel *channel = container_of(ctrl->handler, @@ -2239,14 +2279,14 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDEO_H264_LEVEL: channel->level = ctrl->val; break; - case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: - channel->bitrate_mode = ctrl->val; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + channel->frame_rc_enable = ctrl->val; break; - case V4L2_CID_MPEG_VIDEO_BITRATE: - channel->bitrate = ctrl->val; - break; - case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: - channel->bitrate_peak = ctrl->val; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + channel->bitrate = channel->mpeg_video_bitrate->val; + channel->bitrate_peak = channel->mpeg_video_bitrate_peak->val; + v4l2_ctrl_activate(channel->mpeg_video_bitrate_peak, + ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); break; case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: channel->cpb_size = ctrl->val; @@ -2254,12 +2294,18 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDEO_GOP_SIZE: channel->gop_size = ctrl->val; break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: + allegro_clamp_qp(channel, ctrl); + break; } return 0; } static const struct v4l2_ctrl_ops allegro_ctrl_ops = { + .try_ctrl = allegro_try_ctrl, .s_ctrl = allegro_s_ctrl, }; @@ -2270,16 +2316,18 @@ static int allegro_open(struct file *file) struct allegro_channel *channel = NULL; struct v4l2_ctrl_handler *handler; u64 mask; + int ret; channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return -ENOMEM; v4l2_fh_init(&channel->fh, vdev); - file->private_data = &channel->fh; - v4l2_fh_add(&channel->fh); init_completion(&channel->completion); + INIT_LIST_HEAD(&channel->source_shadow_list); + INIT_LIST_HEAD(&channel->stream_shadow_list); + mutex_init(&channel->shadow_list_lock); channel->dev = dev; @@ -2298,11 +2346,42 @@ static int allegro_open(struct file *file) V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_5_1, mask, V4L2_MPEG_VIDEO_H264_LEVEL_5_1); + channel->mpeg_video_h264_i_frame_qp = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, + 0, 51, 1, 30); + channel->mpeg_video_h264_max_qp = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MAX_QP, + 0, 51, 1, 51); + channel->mpeg_video_h264_min_qp = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MIN_QP, + 0, 51, 1, 0); + channel->mpeg_video_h264_p_frame_qp = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, + 0, 51, 1, 30); + channel->mpeg_video_h264_b_frame_qp = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, + 0, 51, 1, 30); + channel->mpeg_video_frame_rc_enable = + v4l2_ctrl_new_std(handler, + &allegro_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, + false, 0x1, + true, false); channel->mpeg_video_bitrate_mode = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, - channel->bitrate_mode); + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, @@ -2324,12 +2403,19 @@ static int allegro_open(struct file *file) 0, ALLEGRO_GOP_SIZE_MAX, 1, channel->gop_size); v4l2_ctrl_new_std(handler, - &allegro_ctrl_ops, - V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, - 1, 32, - 1, 1); + &allegro_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + 1, 32, + 1, 1); + if (handler->error != 0) { + ret = handler->error; + goto error; + } + channel->fh.ctrl_handler = handler; + v4l2_ctrl_cluster(3, &channel->mpeg_video_bitrate_mode); + channel->mcu_channel_id = -1; channel->user_id = -1; @@ -2341,7 +2427,20 @@ static int allegro_open(struct file *file) channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel, allegro_queue_init); + if (IS_ERR(channel->fh.m2m_ctx)) { + ret = PTR_ERR(channel->fh.m2m_ctx); + goto error; + } + + file->private_data = &channel->fh; + v4l2_fh_add(&channel->fh); + return 0; + +error: + v4l2_ctrl_handler_free(handler); + kfree(channel); + return ret; } static int allegro_release(struct file *file) @@ -2636,6 +2735,46 @@ static int allegro_ioctl_streamon(struct file *file, void *priv, return v4l2_m2m_streamon(file, fh->m2m_ctx, type); } +static int allegro_g_parm(struct file *file, void *fh, + struct v4l2_streamparm *a) +{ + struct allegro_channel *channel = fh_to_channel(fh); + struct v4l2_fract *timeperframe; + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + timeperframe = &a->parm.output.timeperframe; + timeperframe->numerator = channel->framerate.denominator; + timeperframe->denominator = channel->framerate.numerator; + + return 0; +} + +static int allegro_s_parm(struct file *file, void *fh, + struct v4l2_streamparm *a) +{ + struct allegro_channel *channel = fh_to_channel(fh); + struct v4l2_fract *timeperframe; + int div; + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + timeperframe = &a->parm.output.timeperframe; + + if (timeperframe->numerator == 0 || timeperframe->denominator == 0) + return allegro_g_parm(file, fh, a); + + div = gcd(timeperframe->denominator, timeperframe->numerator); + channel->framerate.numerator = timeperframe->denominator / div; + channel->framerate.denominator = timeperframe->numerator / div; + + return 0; +} + static int allegro_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { @@ -2674,6 +2813,9 @@ static const struct v4l2_ioctl_ops allegro_ioctl_ops = { .vidioc_encoder_cmd = allegro_encoder_cmd, .vidioc_enum_framesizes = allegro_enum_framesizes, + .vidioc_g_parm = allegro_g_parm, + .vidioc_s_parm = allegro_s_parm, + .vidioc_subscribe_event = allegro_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; @@ -2701,7 +2843,7 @@ static int allegro_register_device(struct allegro_dev *dev) video_dev->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; video_set_drvdata(video_dev, dev); - return video_register_device(video_dev, VFL_TYPE_GRABBER, 0); + return video_register_device(video_dev, VFL_TYPE_VIDEO, 0); } static void allegro_device_run(void *priv) @@ -2714,18 +2856,26 @@ static void allegro_device_run(void *priv) dma_addr_t src_uv; dma_addr_t dst_addr; unsigned long dst_size; + u64 src_handle; + u64 dst_handle; - dst_buf = v4l2_m2m_next_dst_buf(channel->fh.m2m_ctx); + dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); dst_size = vb2_plane_size(&dst_buf->vb2_buf, 0); - allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size); + dst_handle = allegro_put_buffer(channel, &channel->stream_shadow_list, + dst_buf); + allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size, + dst_handle); - src_buf = v4l2_m2m_next_src_buf(channel->fh.m2m_ctx); + src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx); src_buf->sequence = channel->osequence++; - src_y = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); src_uv = src_y + (channel->stride * channel->height); - allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv); + src_handle = allegro_put_buffer(channel, &channel->source_shadow_list, + src_buf); + allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv, src_handle); + + v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx); } static const struct v4l2_m2m_ops allegro_m2m_ops = { @@ -2933,8 +3083,8 @@ static int allegro_probe(struct platform_device *pdev) return -EINVAL; } sram_regs = devm_ioremap(&pdev->dev, - sram_res->start, - resource_size(sram_res)); + sram_res->start, + resource_size(sram_res)); if (IS_ERR(sram_regs)) { dev_err(&pdev->dev, "failed to map sram\n"); return PTR_ERR(sram_regs); diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.c b/drivers/staging/media/allegro-dvt/allegro-mail.c new file mode 100644 index 000000000000..df0d8d26a6fb --- /dev/null +++ b/drivers/staging/media/allegro-dvt/allegro-mail.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de> + * + * Helper functions for handling messages that are send via mailbox to the + * Allegro VCU firmware. + */ + +#include <linux/export.h> + +#include "allegro-mail.h" + +const char *msg_type_name(enum mcu_msg_type type) +{ + static char buf[9]; + + switch (type) { + case MCU_MSG_TYPE_INIT: + return "INIT"; + case MCU_MSG_TYPE_CREATE_CHANNEL: + return "CREATE_CHANNEL"; + case MCU_MSG_TYPE_DESTROY_CHANNEL: + return "DESTROY_CHANNEL"; + case MCU_MSG_TYPE_ENCODE_FRAME: + return "ENCODE_FRAME"; + case MCU_MSG_TYPE_PUT_STREAM_BUFFER: + return "PUT_STREAM_BUFFER"; + case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE: + return "PUSH_BUFFER_INTERMEDIATE"; + case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE: + return "PUSH_BUFFER_REFERENCE"; + default: + snprintf(buf, sizeof(buf), "(0x%04x)", type); + return buf; + } +} +EXPORT_SYMBOL(msg_type_name); diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.h b/drivers/staging/media/allegro-dvt/allegro-mail.h new file mode 100644 index 000000000000..17db665f8e1e --- /dev/null +++ b/drivers/staging/media/allegro-dvt/allegro-mail.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de> + * + * Allegro VCU firmware mailbox mail definitions + */ + +#ifndef ALLEGRO_MAIL_H +#define ALLEGRO_MAIL_H + +#include <linux/kernel.h> + +enum mcu_msg_type { + MCU_MSG_TYPE_INIT = 0x0000, + MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005, + MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006, + MCU_MSG_TYPE_ENCODE_FRAME = 0x0007, + MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012, + MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e, + MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f, +}; + +const char *msg_type_name(enum mcu_msg_type type); + +struct mcu_msg_header { + u16 length; /* length of the body in bytes */ + u16 type; +} __attribute__ ((__packed__)); + +struct mcu_msg_init_request { + struct mcu_msg_header header; + u32 reserved0; /* maybe a unused channel id */ + u32 suballoc_dma; + u32 suballoc_size; + s32 l2_cache[3]; +} __attribute__ ((__packed__)); + +struct mcu_msg_init_response { + struct mcu_msg_header header; + u32 reserved0; +} __attribute__ ((__packed__)); + +struct create_channel_param { + u16 width; + u16 height; + u32 format; + u32 colorspace; + u32 src_mode; + u8 profile; + u16 constraint_set_flags; + s8 codec; + u16 level; + u16 tier; + u32 sps_param; + u32 pps_param; + + u32 enc_option; +#define AL_OPT_WPP BIT(0) +#define AL_OPT_TILE BIT(1) +#define AL_OPT_LF BIT(2) +#define AL_OPT_LF_X_SLICE BIT(3) +#define AL_OPT_LF_X_TILE BIT(4) +#define AL_OPT_SCL_LST BIT(5) +#define AL_OPT_CONST_INTRA_PRED BIT(6) +#define AL_OPT_QP_TAB_RELATIVE BIT(7) +#define AL_OPT_FIX_PREDICTOR BIT(8) +#define AL_OPT_CUSTOM_LDA BIT(9) +#define AL_OPT_ENABLE_AUTO_QP BIT(10) +#define AL_OPT_ADAPT_AUTO_QP BIT(11) +#define AL_OPT_TRANSFO_SKIP BIT(13) +#define AL_OPT_FORCE_REC BIT(15) +#define AL_OPT_FORCE_MV_OUT BIT(16) +#define AL_OPT_FORCE_MV_CLIP BIT(17) +#define AL_OPT_LOWLAT_SYNC BIT(18) +#define AL_OPT_LOWLAT_INT BIT(19) +#define AL_OPT_RDO_COST_MODE BIT(20) + + s8 beta_offset; + s8 tc_offset; + u16 reserved10; + u32 unknown11; + u32 unknown12; + u16 num_slices; + u16 prefetch_auto; + u32 prefetch_mem_offset; + u32 prefetch_mem_size; + u16 clip_hrz_range; + u16 clip_vrt_range; + u16 me_range[4]; + u8 max_cu_size; + u8 min_cu_size; + u8 max_tu_size; + u8 min_tu_size; + u8 max_transfo_depth_inter; + u8 max_transfo_depth_intra; + u16 reserved20; + u32 entropy_mode; + u32 wp_mode; + + /* rate control param */ + u32 rate_control_mode; + u32 initial_rem_delay; + u32 cpb_size; + u16 framerate; + u16 clk_ratio; + u32 target_bitrate; + u32 max_bitrate; + u16 initial_qp; + u16 min_qp; + u16 max_qp; + s16 ip_delta; + s16 pb_delta; + u16 golden_ref; + u16 golden_delta; + u16 golden_ref_frequency; + u32 rate_control_option; + + /* gop param */ + u32 gop_ctrl_mode; + u32 freq_idr; + u32 freq_lt; + u32 gdr_mode; + u16 gop_length; + u8 num_b; + u8 freq_golden_ref; + + u32 subframe_latency; + u32 lda_control_mode; + u32 unknown41; +} __attribute__ ((__packed__)); + +struct mcu_msg_create_channel { + struct mcu_msg_header header; + u32 user_id; + struct create_channel_param param; +} __attribute__ ((__packed__)); + +struct mcu_msg_create_channel_response { + struct mcu_msg_header header; + u32 channel_id; + u32 user_id; + u32 options; + u32 num_core; + u32 pps_param; + u32 int_buffers_count; + u32 int_buffers_size; + u32 rec_buffers_count; + u32 rec_buffers_size; + u32 reserved; + u32 error_code; +} __attribute__ ((__packed__)); + +struct mcu_msg_destroy_channel { + struct mcu_msg_header header; + u32 channel_id; +} __attribute__ ((__packed__)); + +struct mcu_msg_destroy_channel_response { + struct mcu_msg_header header; + u32 channel_id; +} __attribute__ ((__packed__)); + +struct mcu_msg_push_buffers_internal_buffer { + u32 dma_addr; + u32 mcu_addr; + u32 size; +} __attribute__ ((__packed__)); + +struct mcu_msg_push_buffers_internal { + struct mcu_msg_header header; + u32 channel_id; + struct mcu_msg_push_buffers_internal_buffer buffer[]; +} __attribute__ ((__packed__)); + +struct mcu_msg_put_stream_buffer { + struct mcu_msg_header header; + u32 channel_id; + u32 dma_addr; + u32 mcu_addr; + u32 size; + u32 offset; + u64 stream_id; +} __attribute__ ((__packed__)); + +struct mcu_msg_encode_frame { + struct mcu_msg_header header; + u32 channel_id; + u32 reserved; + + u32 encoding_options; +#define AL_OPT_USE_QP_TABLE BIT(0) +#define AL_OPT_FORCE_LOAD BIT(1) +#define AL_OPT_USE_L2 BIT(2) +#define AL_OPT_DISABLE_INTRA BIT(3) +#define AL_OPT_DEPENDENT_SLICES BIT(4) + + s16 pps_qp; + u16 padding; + u64 user_param; + u64 src_handle; + + u32 request_options; +#define AL_OPT_SCENE_CHANGE BIT(0) +#define AL_OPT_RESTART_GOP BIT(1) +#define AL_OPT_USE_LONG_TERM BIT(2) +#define AL_OPT_UPDATE_PARAMS BIT(3) + + /* u32 scene_change_delay (optional) */ + /* rate control param (optional) */ + /* gop param (optional) */ + u32 src_y; + u32 src_uv; + u32 stride; + u32 ep2; + u64 ep2_v; +} __attribute__ ((__packed__)); + +struct mcu_msg_encode_frame_response { + struct mcu_msg_header header; + u32 channel_id; + u64 stream_id; /* see mcu_msg_put_stream_buffer */ + u64 user_param; /* see mcu_msg_encode_frame */ + u64 src_handle; /* see mcu_msg_encode_frame */ + u16 skip; + u16 is_ref; + u32 initial_removal_delay; + u32 dpb_output_delay; + u32 size; + u32 frame_tag_size; + s32 stuffing; + s32 filler; + u16 num_column; + u16 num_row; + u16 qp; + u8 num_ref_idx_l0; + u8 num_ref_idx_l1; + u32 partition_table_offset; + s32 partition_table_size; + u32 sum_complex; + s32 tile_width[4]; + s32 tile_height[22]; + u32 error_code; + + u32 slice_type; +#define AL_ENC_SLICE_TYPE_B 0 +#define AL_ENC_SLICE_TYPE_P 1 +#define AL_ENC_SLICE_TYPE_I 2 + + u32 pic_struct; + u8 is_idr; + u8 is_first_slice; + u8 is_last_slice; + u8 reserved; + u16 pps_qp; + u16 reserved1; + u32 reserved2; +} __attribute__ ((__packed__)); + +union mcu_msg_response { + struct mcu_msg_header header; + struct mcu_msg_init_response init; + struct mcu_msg_create_channel_response create_channel; + struct mcu_msg_destroy_channel_response destroy_channel; + struct mcu_msg_encode_frame_response encode_frame; +}; + +#endif diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig index de77fe6554e7..99aed9a5b0b9 100644 --- a/drivers/staging/media/hantro/Kconfig +++ b/drivers/staging/media/hantro/Kconfig @@ -1,19 +1,27 @@ # SPDX-License-Identifier: GPL-2.0 config VIDEO_HANTRO tristate "Hantro VPU driver" - depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on ARCH_MXC || ARCH_ROCKCHIP || COMPILE_TEST depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER depends on MEDIA_CONTROLLER_REQUEST_API select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_VMALLOC select V4L2_MEM2MEM_DEV help - Support for the Hantro IP based Video Processing Unit present on - Rockchip SoC, which accelerates video and image encoding and - decoding. + Support for the Hantro IP based Video Processing Units present on + Rockchip and NXP i.MX8M SoCs, which accelerate video and image + encoding and decoding. To compile this driver as a module, choose M here: the module will be called hantro-vpu. +config VIDEO_HANTRO_IMX8M + bool "Hantro VPU i.MX8M support" + depends on VIDEO_HANTRO + depends on ARCH_MXC || COMPILE_TEST + default y + help + Enable support for i.MX8M SoCs. + config VIDEO_HANTRO_ROCKCHIP bool "Hantro VPU Rockchip support" depends on VIDEO_HANTRO diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile index 496b30c3c396..68c29a9c4946 100644 --- a/drivers/staging/media/hantro/Makefile +++ b/drivers/staging/media/hantro/Makefile @@ -16,6 +16,9 @@ hantro-vpu-y += \ hantro_mpeg2.o \ hantro_vp8.o +hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \ + imx8m_vpu_hw.o + hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \ rk3288_vpu_hw.o \ rk3399_vpu_hw.o diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h index b0faa43b3f79..327ddef45345 100644 --- a/drivers/staging/media/hantro/hantro.h +++ b/drivers/staging/media/hantro/hantro.h @@ -423,7 +423,7 @@ hantro_get_dst_buf(struct hantro_ctx *ctx) static inline bool hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt) { - return fmt->fourcc != V4L2_PIX_FMT_NV12; + return !hantro_is_encoder_ctx(ctx) && fmt->fourcc != V4L2_PIX_FMT_NV12; } static inline dma_addr_t diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index 97c615a2f057..ace13973e2d0 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -362,6 +362,16 @@ static const struct hantro_ctrl controls[] = { .max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B, }, }, { + .codec = HANTRO_H264_DECODER, + .cfg = { + .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, + .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, + .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + .menu_skip_mask = + BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED), + .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, + } + }, { }, }; @@ -489,6 +499,9 @@ static const struct of_device_id of_hantro_match[] = { { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, }, { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, }, #endif +#ifdef CONFIG_VIDEO_HANTRO_IMX8M + { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, }, +#endif { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_hantro_match); @@ -558,13 +571,13 @@ static int hantro_attach_func(struct hantro_dev *vpu, goto err_rel_entity1; /* Connect the three entities */ - ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1, + ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rel_entity2; - ret = media_create_pad_link(&func->proc, 0, &func->sink, 0, + ret = media_create_pad_link(&func->proc, 1, &func->sink, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) @@ -664,7 +677,7 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid) video_set_drvdata(vfd, vpu); - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n"); return ret; diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c index 0d8afc3e5d71..b22418436823 100644 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c @@ -67,12 +67,23 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu, unsigned char *chroma_qtable) { u32 reg, i; + __be32 *luma_qtable_p; + __be32 *chroma_qtable_p; + luma_qtable_p = (__be32 *)luma_qtable; + chroma_qtable_p = (__be32 *)chroma_qtable; + + /* + * Quantization table registers must be written in contiguous blocks. + * DO NOT collapse the below two "for" loops into one. + */ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) { - reg = get_unaligned_be32(&luma_qtable[i]); + reg = get_unaligned_be32(&luma_qtable_p[i]); vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i)); + } - reg = get_unaligned_be32(&chroma_qtable[i]); + for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) { + reg = get_unaligned_be32(&chroma_qtable_p[i]); vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i)); } } @@ -103,8 +114,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx) hantro_h1_set_src_img_ctrl(vpu, ctx); hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf); hantro_h1_jpeg_enc_set_qtable(vpu, - hantro_jpeg_get_qtable(&jpeg_ctx, 0), - hantro_jpeg_get_qtable(&jpeg_ctx, 1)); + hantro_jpeg_get_qtable(0), + hantro_jpeg_get_qtable(1)); reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16 | H1_REG_AXI_CTRL_INPUT_SWAP16 diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h index 2398d4c1f207..7dfc9bad7297 100644 --- a/drivers/staging/media/hantro/hantro_hw.h +++ b/drivers/staging/media/hantro/hantro_hw.h @@ -151,6 +151,7 @@ enum hantro_enc_fmt { extern const struct hantro_variant rk3399_vpu_variant; extern const struct hantro_variant rk3328_vpu_variant; extern const struct hantro_variant rk3288_vpu_variant; +extern const struct hantro_variant imx8mq_vpu_variant; extern const struct hantro_postproc_regs hantro_g1_postproc_regs; diff --git a/drivers/staging/media/hantro/hantro_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c index 125eb41f2ede..36c140fc6a36 100644 --- a/drivers/staging/media/hantro/hantro_jpeg.c +++ b/drivers/staging/media/hantro/hantro_jpeg.c @@ -23,19 +23,21 @@ #define HUFF_CHROMA_AC_OFF 409 /* Default tables from JPEG ITU-T.81 - * (ISO/IEC 10918-1) Annex K.3, I + * (ISO/IEC 10918-1) Annex K, tables K.1 and K.2 */ static const unsigned char luma_q_table[] = { - 0x10, 0x0b, 0x0a, 0x10, 0x7c, 0x8c, 0x97, 0xa1, - 0x0c, 0x0c, 0x0e, 0x13, 0x7e, 0x9e, 0xa0, 0x9b, - 0x0e, 0x0d, 0x10, 0x18, 0x8c, 0x9d, 0xa9, 0x9c, - 0x0e, 0x11, 0x16, 0x1d, 0x97, 0xbb, 0xb4, 0xa2, - 0x12, 0x16, 0x25, 0x38, 0xa8, 0x6d, 0x67, 0xb1, - 0x18, 0x23, 0x37, 0x40, 0xb5, 0x68, 0x71, 0xc0, + 0x10, 0x0b, 0x0a, 0x10, 0x18, 0x28, 0x33, 0x3d, + 0x0c, 0x0c, 0x0e, 0x13, 0x1a, 0x3a, 0x3c, 0x37, + 0x0e, 0x0d, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38, + 0x0e, 0x11, 0x16, 0x1d, 0x33, 0x57, 0x50, 0x3e, + 0x12, 0x16, 0x25, 0x38, 0x44, 0x6d, 0x67, 0x4d, + 0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5c, 0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65, - 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0xc7, + 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63 }; +static unsigned char luma_q_table_reordered[ARRAY_SIZE(luma_q_table)]; + static const unsigned char chroma_q_table[] = { 0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63, 0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63, @@ -47,6 +49,30 @@ static const unsigned char chroma_q_table[] = { 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; +static unsigned char chroma_q_table_reordered[ARRAY_SIZE(chroma_q_table)]; + +static const unsigned char zigzag[64] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; + +static const u32 hw_reorder[64] = { + 0, 8, 16, 24, 1, 9, 17, 25, + 32, 40, 48, 56, 33, 41, 49, 57, + 2, 10, 18, 26, 3, 11, 19, 27, + 34, 42, 50, 58, 35, 43, 51, 59, + 4, 12, 20, 28, 5, 13, 21, 29, + 36, 44, 52, 60, 37, 45, 53, 61, + 6, 14, 22, 30, 7, 15, 23, 31, + 38, 46, 54, 62, 39, 47, 55, 63 +}; + /* Huffman tables are shared with CODA */ static const unsigned char luma_dc_table[] = { 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, @@ -225,20 +251,29 @@ static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = { 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00, }; +static unsigned char jpeg_scale_qp(const unsigned char qp, int scale) +{ + unsigned int temp; + + temp = DIV_ROUND_CLOSEST((unsigned int)qp * scale, 100); + if (temp <= 0) + temp = 1; + if (temp > 255) + temp = 255; + + return (unsigned char)temp; +} + static void -jpeg_scale_quant_table(unsigned char *q_tab, +jpeg_scale_quant_table(unsigned char *file_q_tab, + unsigned char *reordered_q_tab, const unsigned char *tab, int scale) { - unsigned int temp; int i; for (i = 0; i < 64; i++) { - temp = DIV_ROUND_CLOSEST((unsigned int)tab[i] * scale, 100); - if (temp <= 0) - temp = 1; - if (temp > 255) - temp = 255; - q_tab[i] = (unsigned char)temp; + file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale); + reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale); } } @@ -256,17 +291,18 @@ static void jpeg_set_quality(unsigned char *buffer, int quality) scale = 200 - 2 * quality; jpeg_scale_quant_table(buffer + LUMA_QUANT_OFF, + luma_q_table_reordered, luma_q_table, scale); jpeg_scale_quant_table(buffer + CHROMA_QUANT_OFF, + chroma_q_table_reordered, chroma_q_table, scale); } -unsigned char * -hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index) +unsigned char *hantro_jpeg_get_qtable(int index) { if (index == 0) - return ctx->buffer + LUMA_QUANT_OFF; - return ctx->buffer + CHROMA_QUANT_OFF; + return luma_q_table_reordered; + return chroma_q_table_reordered; } void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx) diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h index 9e8397c71388..9474a00277f8 100644 --- a/drivers/staging/media/hantro/hantro_jpeg.h +++ b/drivers/staging/media/hantro/hantro_jpeg.h @@ -9,5 +9,5 @@ struct hantro_jpeg_ctx { unsigned char *buffer; }; -unsigned char *hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index); +unsigned char *hantro_jpeg_get_qtable(int index); void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx); diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c index 28a85d301d7f..44062ffceaea 100644 --- a/drivers/staging/media/hantro/hantro_postproc.c +++ b/drivers/staging/media/hantro/hantro_postproc.c @@ -14,16 +14,16 @@ #define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \ { \ - hantro_reg_write((vpu), \ - &((vpu)->variant->postproc_regs->reg_name), \ - (val)); \ + hantro_reg_write(vpu, \ + &(vpu)->variant->postproc_regs->reg_name, \ + val); \ } #define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \ { \ - hantro_reg_write_s((vpu), \ - &((vpu)->variant->postproc_regs->reg_name), \ - (val)); \ + hantro_reg_write_s(vpu, \ + &(vpu)->variant->postproc_regs->reg_name, \ + val); \ } #define VPU_PP_IN_YUYV 0x0 diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index 0198bcda26b7..f4ae2cee0f18 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -295,7 +295,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f, * +---------------------------+ */ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE && - !hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) + !hantro_needs_postproc(ctx, fmt)) pix_mp->plane_fmt[0].sizeimage += 64 * MB_WIDTH(pix_mp->width) * MB_WIDTH(pix_mp->height) + 32; diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c new file mode 100644 index 000000000000..cb2420c5526e --- /dev/null +++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hantro VPU codec driver + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de> + */ + +#include <linux/clk.h> +#include <linux/delay.h> + +#include "hantro.h" +#include "hantro_jpeg.h" +#include "hantro_g1_regs.h" + +#define CTRL_SOFT_RESET 0x00 +#define RESET_G1 BIT(1) +#define RESET_G2 BIT(0) + +#define CTRL_CLOCK_ENABLE 0x04 +#define CLOCK_G1 BIT(1) +#define CLOCK_G2 BIT(0) + +#define CTRL_G1_DEC_FUSE 0x08 +#define CTRL_G1_PP_FUSE 0x0c +#define CTRL_G2_DEC_FUSE 0x10 + +static void imx8m_soft_reset(struct hantro_dev *vpu, u32 reset_bits) +{ + u32 val; + + /* Assert */ + val = readl(vpu->ctrl_base + CTRL_SOFT_RESET); + val &= ~reset_bits; + writel(val, vpu->ctrl_base + CTRL_SOFT_RESET); + + udelay(2); + + /* Release */ + val = readl(vpu->ctrl_base + CTRL_SOFT_RESET); + val |= reset_bits; + writel(val, vpu->ctrl_base + CTRL_SOFT_RESET); +} + +static void imx8m_clk_enable(struct hantro_dev *vpu, u32 clock_bits) +{ + u32 val; + + val = readl(vpu->ctrl_base + CTRL_CLOCK_ENABLE); + val |= clock_bits; + writel(val, vpu->ctrl_base + CTRL_CLOCK_ENABLE); +} + +static int imx8mq_runtime_resume(struct hantro_dev *vpu) +{ + int ret; + + ret = clk_bulk_prepare_enable(vpu->variant->num_clocks, vpu->clocks); + if (ret) { + dev_err(vpu->dev, "Failed to enable clocks\n"); + return ret; + } + + imx8m_soft_reset(vpu, RESET_G1 | RESET_G2); + imx8m_clk_enable(vpu, CLOCK_G1 | CLOCK_G2); + + /* Set values of the fuse registers */ + writel(0xffffffff, vpu->ctrl_base + CTRL_G1_DEC_FUSE); + writel(0xffffffff, vpu->ctrl_base + CTRL_G1_PP_FUSE); + writel(0xffffffff, vpu->ctrl_base + CTRL_G2_DEC_FUSE); + + clk_bulk_disable_unprepare(vpu->variant->num_clocks, vpu->clocks); + + return 0; +} + +/* + * Supported formats. + */ + +static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = { + { + .fourcc = V4L2_PIX_FMT_YUYV, + .codec_mode = HANTRO_MODE_NONE, + }, +}; + +static const struct hantro_fmt imx8m_vpu_dec_fmts[] = { + { + .fourcc = V4L2_PIX_FMT_NV12, + .codec_mode = HANTRO_MODE_NONE, + }, + { + .fourcc = V4L2_PIX_FMT_MPEG2_SLICE, + .codec_mode = HANTRO_MODE_MPEG2_DEC, + .max_depth = 2, + .frmsize = { + .min_width = 48, + .max_width = 1920, + .step_width = MB_DIM, + .min_height = 48, + .max_height = 1088, + .step_height = MB_DIM, + }, + }, + { + .fourcc = V4L2_PIX_FMT_VP8_FRAME, + .codec_mode = HANTRO_MODE_VP8_DEC, + .max_depth = 2, + .frmsize = { + .min_width = 48, + .max_width = 3840, + .step_width = 16, + .min_height = 48, + .max_height = 2160, + .step_height = 16, + }, + }, + { + .fourcc = V4L2_PIX_FMT_H264_SLICE, + .codec_mode = HANTRO_MODE_H264_DEC, + .max_depth = 2, + .frmsize = { + .min_width = 48, + .max_width = 3840, + .step_width = MB_DIM, + .min_height = 48, + .max_height = 2160, + .step_height = MB_DIM, + }, + }, +}; + +static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id) +{ + struct hantro_dev *vpu = dev_id; + enum vb2_buffer_state state; + u32 status; + + status = vdpu_read(vpu, G1_REG_INTERRUPT); + state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ? + VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR; + + vdpu_write(vpu, 0, G1_REG_INTERRUPT); + vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG); + + hantro_irq_done(vpu, 0, state); + + return IRQ_HANDLED; +} + +static int imx8mq_vpu_hw_init(struct hantro_dev *vpu) +{ + vpu->dec_base = vpu->reg_bases[0]; + vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1]; + + return 0; +} + +static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + + imx8m_soft_reset(vpu, RESET_G1); +} + +/* + * Supported codec ops. + */ + +static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = { + [HANTRO_MODE_MPEG2_DEC] = { + .run = hantro_g1_mpeg2_dec_run, + .reset = imx8m_vpu_g1_reset, + .init = hantro_mpeg2_dec_init, + .exit = hantro_mpeg2_dec_exit, + }, + [HANTRO_MODE_VP8_DEC] = { + .run = hantro_g1_vp8_dec_run, + .reset = imx8m_vpu_g1_reset, + .init = hantro_vp8_dec_init, + .exit = hantro_vp8_dec_exit, + }, + [HANTRO_MODE_H264_DEC] = { + .run = hantro_g1_h264_dec_run, + .reset = imx8m_vpu_g1_reset, + .init = hantro_h264_dec_init, + .exit = hantro_h264_dec_exit, + }, +}; + +/* + * VPU variants. + */ + +static const struct hantro_irq imx8mq_irqs[] = { + { "g1", imx8m_vpu_g1_irq }, + { "g2", NULL /* TODO: imx8m_vpu_g2_irq */ }, +}; + +static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" }; +static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" }; + +const struct hantro_variant imx8mq_vpu_variant = { + .dec_fmts = imx8m_vpu_dec_fmts, + .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts), + .postproc_fmts = imx8m_vpu_postproc_fmts, + .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts), + .postproc_regs = &hantro_g1_postproc_regs, + .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER | + HANTRO_H264_DECODER, + .codec_ops = imx8mq_vpu_codec_ops, + .init = imx8mq_vpu_hw_init, + .runtime_resume = imx8mq_runtime_resume, + .irqs = imx8mq_irqs, + .num_irqs = ARRAY_SIZE(imx8mq_irqs), + .clk_names = imx8mq_clk_names, + .num_clocks = ARRAY_SIZE(imx8mq_clk_names), + .reg_names = imx8mq_reg_names, + .num_regs = ARRAY_SIZE(imx8mq_reg_names) +}; diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c index 4c2d43fb6fd1..3498e6124acd 100644 --- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c +++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c @@ -18,9 +18,8 @@ * * Quantization luma table values are written to registers * VEPU_swreg_0-VEPU_swreg_15, and chroma table values to - * VEPU_swreg_16-VEPU_swreg_31. - * - * JPEG zigzag order is expected on the quantization tables. + * VEPU_swreg_16-VEPU_swreg_31. A special order is needed, neither + * zigzag, nor linear. */ #include <asm/unaligned.h> @@ -98,12 +97,23 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu, unsigned char *chroma_qtable) { u32 reg, i; + __be32 *luma_qtable_p; + __be32 *chroma_qtable_p; + + luma_qtable_p = (__be32 *)luma_qtable; + chroma_qtable_p = (__be32 *)chroma_qtable; + /* + * Quantization table registers must be written in contiguous blocks. + * DO NOT collapse the below two "for" loops into one. + */ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) { - reg = get_unaligned_be32(&luma_qtable[i]); + reg = get_unaligned_be32(&luma_qtable_p[i]); vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i)); + } - reg = get_unaligned_be32(&chroma_qtable[i]); + for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) { + reg = get_unaligned_be32(&chroma_qtable_p[i]); vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i)); } } @@ -134,8 +144,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx) rk3399_vpu_set_src_img_ctrl(vpu, ctx); rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf); rk3399_vpu_jpeg_enc_set_qtable(vpu, - hantro_jpeg_get_qtable(&jpeg_ctx, 0), - hantro_jpeg_get_qtable(&jpeg_ctx, 1)); + hantro_jpeg_get_qtable(0), + hantro_jpeg_get_qtable(1)); reg = VEPU_REG_OUTPUT_SWAP32 | VEPU_REG_OUTPUT_SWAP16 diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index 7712e7be8625..d37b776ff86d 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -643,7 +643,7 @@ static int capture_open(struct file *file) if (ret) v4l2_err(priv->src_sd, "v4l2_fh_open failed\n"); - ret = v4l2_pipeline_pm_use(&vfd->entity, 1); + ret = v4l2_pipeline_pm_get(&vfd->entity); if (ret) v4l2_fh_release(file); @@ -664,7 +664,7 @@ static int capture_release(struct file *file) vq->owner = NULL; } - v4l2_pipeline_pm_use(&vfd->entity, 0); + v4l2_pipeline_pm_put(&vfd->entity); v4l2_fh_release(file); mutex_unlock(&priv->mutex); @@ -742,7 +742,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) vfd->v4l2_dev = v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(sd, "Failed to register video device\n"); return ret; @@ -778,7 +778,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) /* setup default format */ fmt_src.pad = priv->src_sd_pad; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); if (ret) { v4l2_err(sd, "failed to get src_sd format\n"); goto unreg; diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c index 2b635ebf62d6..2cc77f6e84b6 100644 --- a/drivers/staging/media/imx/imx-media-csc-scaler.c +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c @@ -849,7 +849,7 @@ int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev) vfd->v4l2_dev = &priv->md->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(vfd->v4l2_dev, "Failed to register video device\n"); return ret; diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index b60ed4f22f6d..e76a6a85baa3 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -457,7 +457,8 @@ static int csi_idmac_setup_channel(struct csi_priv *priv) case V4L2_PIX_FMT_SGBRG16: case V4L2_PIX_FMT_SGRBG16: case V4L2_PIX_FMT_SRGGB16: - case V4L2_PIX_FMT_Y16: + case V4L2_PIX_FMT_Y10: + case V4L2_PIX_FMT_Y12: burst_size = 8; passthrough_bits = 16; break; @@ -1459,6 +1460,8 @@ static void csi_try_fmt(struct csi_priv *priv, /* propagate colorimetry from sink */ sdformat->format.colorspace = infmt->colorspace; sdformat->format.xfer_func = infmt->xfer_func; + sdformat->format.quantization = infmt->quantization; + sdformat->format.ycbcr_enc = infmt->ycbcr_enc; break; case CSI_SINK_PAD: diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c index 0788a1874557..fae981698c49 100644 --- a/drivers/staging/media/imx/imx-media-utils.c +++ b/drivers/staging/media/imx/imx-media-utils.c @@ -161,17 +161,24 @@ static const struct imx_media_pixfmt rgb_formats[] = { .bayer = true, }, { .fourcc = V4L2_PIX_FMT_GREY, - .codes = {MEDIA_BUS_FMT_Y8_1X8}, - .cs = IPUV3_COLORSPACE_RGB, - .bpp = 8, - .bayer = true, - }, { - .fourcc = V4L2_PIX_FMT_Y16, .codes = { + MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y12_1X12, }, .cs = IPUV3_COLORSPACE_RGB, + .bpp = 8, + .bayer = true, + }, { + .fourcc = V4L2_PIX_FMT_Y10, + .codes = {MEDIA_BUS_FMT_Y10_1X10}, + .cs = IPUV3_COLORSPACE_RGB, + .bpp = 16, + .bayer = true, + }, { + .fourcc = V4L2_PIX_FMT_Y12, + .codes = {MEDIA_BUS_FMT_Y12_1X12}, + .cs = IPUV3_COLORSPACE_RGB, .bpp = 16, .bayer = true, }, diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c index cd3dd6e33ef0..8ab823042c09 100644 --- a/drivers/staging/media/imx/imx6-mipi-csi2.c +++ b/drivers/staging/media/imx/imx6-mipi-csi2.c @@ -592,22 +592,19 @@ static int csi2_probe(struct platform_device *pdev) csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref"); if (IS_ERR(csi2->pllref_clk)) { v4l2_err(&csi2->sd, "failed to get pll reference clock\n"); - ret = PTR_ERR(csi2->pllref_clk); - return ret; + return PTR_ERR(csi2->pllref_clk); } csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy"); if (IS_ERR(csi2->dphy_clk)) { v4l2_err(&csi2->sd, "failed to get dphy clock\n"); - ret = PTR_ERR(csi2->dphy_clk); - return ret; + return PTR_ERR(csi2->dphy_clk); } csi2->pix_clk = devm_clk_get(&pdev->dev, "pix"); if (IS_ERR(csi2->pix_clk)) { v4l2_err(&csi2->sd, "failed to get pixel clock\n"); - ret = PTR_ERR(csi2->pix_clk); - return ret; + return PTR_ERR(csi2->pix_clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c index db30e2c70f2f..acbdffb77668 100644 --- a/drivers/staging/media/imx/imx7-media-csi.c +++ b/drivers/staging/media/imx/imx7-media-csi.c @@ -292,7 +292,7 @@ static void imx7_csi_hw_disable(struct imx7_csi *csi) static void imx7_csi_dma_reflash(struct imx7_csi *csi) { - u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR18); + u32 cr3; cr3 = imx7_csi_reg_read(csi, CSI_CSICR3); cr3 |= BIT_DMA_REFLASH_RFF; @@ -804,6 +804,22 @@ static int imx7_csi_configure(struct imx7_csi *csi) case V4L2_PIX_FMT_YUYV: cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B; break; + case V4L2_PIX_FMT_GREY: + if (in_code == MEDIA_BUS_FMT_Y8_1X8) + cr18 |= BIT_MIPI_DATA_FORMAT_RAW8; + else if (in_code == MEDIA_BUS_FMT_Y10_1X10) + cr18 |= BIT_MIPI_DATA_FORMAT_RAW10; + else + cr18 |= BIT_MIPI_DATA_FORMAT_RAW12; + break; + case V4L2_PIX_FMT_Y10: + cr18 |= BIT_MIPI_DATA_FORMAT_RAW10; + cr1 |= BIT_PIXEL_BIT; + break; + case V4L2_PIX_FMT_Y12: + cr18 |= BIT_MIPI_DATA_FORMAT_RAW12; + cr1 |= BIT_PIXEL_BIT; + break; case V4L2_PIX_FMT_SBGGR8: cr18 |= BIT_MIPI_DATA_FORMAT_RAW8; break; @@ -1009,10 +1025,13 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, sdformat->format.width = in_fmt->width; sdformat->format.height = in_fmt->height; sdformat->format.code = in_fmt->code; + sdformat->format.field = in_fmt->field; *cc = in_cc; sdformat->format.colorspace = in_fmt->colorspace; sdformat->format.xfer_func = in_fmt->xfer_func; + sdformat->format.quantization = in_fmt->quantization; + sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc; break; case IMX7_CSI_PAD_SINK: *cc = imx_media_find_mbus_format(sdformat->format.code, @@ -1023,6 +1042,9 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, false); sdformat->format.code = (*cc)->codes[0]; } + + if (sdformat->format.field != V4L2_FIELD_INTERLACED) + sdformat->format.field = V4L2_FIELD_NONE; break; default: return -EINVAL; diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c index 383abecb3bec..fbc1a924652a 100644 --- a/drivers/staging/media/imx/imx7-mipi-csis.c +++ b/drivers/staging/media/imx/imx7-mipi-csis.c @@ -280,6 +280,18 @@ static const struct csis_pix_format mipi_csis_formats[] = { .code = MEDIA_BUS_FMT_YUYV8_2X8, .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT, .data_alignment = 16, + }, { + .code = MEDIA_BUS_FMT_Y8_1X8, + .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8, + .data_alignment = 8, + }, { + .code = MEDIA_BUS_FMT_Y10_1X10, + .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10, + .data_alignment = 16, + }, { + .code = MEDIA_BUS_FMT_Y12_1X12, + .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12, + .data_alignment = 16, } }; @@ -301,6 +313,7 @@ static int mipi_csis_dump_regs(struct csi_state *state) { 0x20, "DPHYSTS" }, { 0x10, "INTMSK" }, { 0x40, "CONFIG_CH0" }, + { 0x44, "RESOL_CH0" }, { 0xC0, "DBG_CONFIG" }, { 0x38, "DPHYSLAVE_L" }, { 0x3C, "DPHYSLAVE_H" }, @@ -404,7 +417,7 @@ static void mipi_csis_set_hsync_settle(struct csi_state *state, int hs_settle) { u32 val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL); - val = ((val & ~MIPI_CSIS_DPHYCTRL_HSS_MASK) | (hs_settle << 24)); + val = (val & ~MIPI_CSIS_DPHYCTRL_HSS_MASK) | (hs_settle << 24); mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val); } @@ -417,6 +430,7 @@ static void mipi_csis_set_params(struct csi_state *state) val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL); val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK; val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET; + val |= MIPI_CSIS_CMN_CTRL_INTER_MODE; mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val); __mipi_csis_set_format(state); @@ -577,7 +591,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable) state->flags |= ST_STREAMING; } else { v4l2_subdev_call(state->src_sd, video, s_stream, 0); - ret = v4l2_subdev_call(state->src_sd, core, s_power, 1); + ret = v4l2_subdev_call(state->src_sd, core, s_power, 0); mipi_csis_stop_stream(state); state->flags &= ~ST_STREAMING; if (state->debug) diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO index dc356d6f03c4..52063b651810 100644 --- a/drivers/staging/media/ipu3/TODO +++ b/drivers/staging/media/ipu3/TODO @@ -13,8 +13,6 @@ staging directory. - Elaborate the functionality of different selection rectangles in driver documentation. This may require driver changes as well. -- More detailed documentation on calculating BDS, GCD etc. sizes needed. - - Document different operation modes, and which buffer queues are relevant in each mode. To process an image, which queues require a buffer an in which ones is it optional? diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c index f36de501edc6..4f04fe838b0c 100644 --- a/drivers/staging/media/ipu3/ipu3-css.c +++ b/drivers/staging/media/ipu3/ipu3-css.c @@ -210,12 +210,12 @@ static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp) /* Initialize the IPU3 CSS hardware and associated h/w blocks */ -int imgu_css_set_powerup(struct device *dev, void __iomem *base) +int imgu_css_set_powerup(struct device *dev, void __iomem *base, + unsigned int freq) { - static const unsigned int freq = 450; u32 pm_ctrl, state, val; - dev_dbg(dev, "%s\n", __func__); + dev_dbg(dev, "%s with freq %u\n", __func__, freq); /* Clear the CSS busy signal */ readl(base + IMGU_REG_GP_BUSY); writel(0, base + IMGU_REG_GP_BUSY); diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h index 6b8bab27ab1f..6108a068b228 100644 --- a/drivers/staging/media/ipu3/ipu3-css.h +++ b/drivers/staging/media/ipu3/ipu3-css.h @@ -187,7 +187,8 @@ bool imgu_css_is_streaming(struct imgu_css *css); bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe); /******************* css hw *******************/ -int imgu_css_set_powerup(struct device *dev, void __iomem *base); +int imgu_css_set_powerup(struct device *dev, void __iomem *base, + unsigned int freq); void imgu_css_set_powerdown(struct device *dev, void __iomem *base); int imgu_css_irq_ack(struct imgu_css *css); diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c index 3d969b0522ab..5f3ff964f3e7 100644 --- a/drivers/staging/media/ipu3/ipu3-mmu.c +++ b/drivers/staging/media/ipu3/ipu3-mmu.c @@ -130,7 +130,7 @@ static u32 *imgu_mmu_alloc_page_table(u32 pteval) for (pte = 0; pte < IPU3_PT_PTES; pte++) pt[pte] = pteval; - set_memory_uc((unsigned long int)pt, IPU3_PT_ORDER); + set_memory_uc((unsigned long)pt, IPU3_PT_ORDER); return pt; } @@ -141,7 +141,7 @@ static u32 *imgu_mmu_alloc_page_table(u32 pteval) */ static void imgu_mmu_free_page_table(u32 *pt) { - set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER); + set_memory_wb((unsigned long)pt, IPU3_PT_ORDER); free_page((unsigned long)pt); } diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index 569e27b824c8..09c8ede1457c 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -1245,7 +1245,7 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe, vdev->queue = &node->vbq; vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX; video_set_drvdata(vdev, imgu); - r = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + r = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (r) { dev_err(dev, "failed to register video device (%d)", r); media_entity_cleanup(&vdev->entity); diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c index 06a61f31ca50..4d53aad31483 100644 --- a/drivers/staging/media/ipu3/ipu3.c +++ b/drivers/staging/media/ipu3/ipu3.c @@ -345,8 +345,20 @@ failed: static int imgu_powerup(struct imgu_device *imgu) { int r; + unsigned int pipe; + unsigned int freq = 200; + struct v4l2_mbus_framefmt *fmt; + + /* input larger than 2048*1152, ask imgu to work on high freq */ + for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { + fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt; + dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u", + pipe, fmt->width, fmt->height); + if ((fmt->width * fmt->height) >= (2048 * 1152)) + freq = 450; + } - r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base); + r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq); if (r) return r; @@ -666,7 +678,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev, atomic_set(&imgu->qbuf_barrier, 0); init_waitqueue_head(&imgu->buf_drain_wq); - r = imgu_css_set_powerup(&pci_dev->dev, imgu->base); + r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200); if (r) { dev_err(&pci_dev->dev, "failed to power up CSS (%d)\n", r); diff --git a/drivers/staging/media/meson/vdec/Makefile b/drivers/staging/media/meson/vdec/Makefile index 6bea129084b7..6e726af84ac9 100644 --- a/drivers/staging/media/meson/vdec/Makefile +++ b/drivers/staging/media/meson/vdec/Makefile @@ -2,7 +2,7 @@ # Makefile for Amlogic meson video decoder driver meson-vdec-objs = esparser.o vdec.o vdec_helpers.o vdec_platform.o -meson-vdec-objs += vdec_1.o -meson-vdec-objs += codec_mpeg12.o +meson-vdec-objs += vdec_1.o vdec_hevc.o +meson-vdec-objs += codec_mpeg12.o codec_h264.o codec_hevc_common.o codec_vp9.o obj-$(CONFIG_VIDEO_MESON_VDEC) += meson-vdec.o diff --git a/drivers/staging/media/meson/vdec/codec_h264.c b/drivers/staging/media/meson/vdec/codec_h264.c new file mode 100644 index 000000000000..c61128fc4bb9 --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_h264.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 BayLibre, SAS + * Author: Maxime Jourdan <mjourdan@baylibre.com> + */ + +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> + +#include "vdec_helpers.h" +#include "dos_regs.h" +#include "codec_h264.h" + +#define SIZE_EXT_FW (20 * SZ_1K) +#define SIZE_WORKSPACE 0x1ee000 +#define SIZE_SEI (8 * SZ_1K) + +/* + * Offset added by the firmware which must be substracted + * from the workspace phyaddr + */ +#define WORKSPACE_BUF_OFFSET 0x1000000 + +/* ISR status */ +#define CMD_MASK GENMASK(7, 0) +#define CMD_SRC_CHANGE 1 +#define CMD_FRAMES_READY 2 +#define CMD_FATAL_ERROR 6 +#define CMD_BAD_WIDTH 7 +#define CMD_BAD_HEIGHT 8 + +#define SEI_DATA_READY BIT(15) + +/* Picture type */ +#define PIC_TOP_BOT 5 +#define PIC_BOT_TOP 6 + +/* Size of Motion Vector per macroblock */ +#define MB_MV_SIZE 96 + +/* Frame status data */ +#define PIC_STRUCT_BIT 5 +#define PIC_STRUCT_MASK GENMASK(2, 0) +#define BUF_IDX_MASK GENMASK(4, 0) +#define ERROR_FLAG BIT(9) +#define OFFSET_BIT 16 +#define OFFSET_MASK GENMASK(15, 0) + +/* Bitstream parsed data */ +#define MB_TOTAL_BIT 8 +#define MB_TOTAL_MASK GENMASK(15, 0) +#define MB_WIDTH_MASK GENMASK(7, 0) +#define MAX_REF_BIT 24 +#define MAX_REF_MASK GENMASK(6, 0) +#define AR_IDC_BIT 16 +#define AR_IDC_MASK GENMASK(7, 0) +#define AR_PRESENT_FLAG BIT(0) +#define AR_EXTEND 0xff + +/* + * Buffer to send to the ESPARSER to signal End Of Stream for H.264. + * This is a 16x16 encoded picture that will trigger drain firmware-side. + * There is no known alternative. + */ +static const u8 eos_sequence[SZ_4K] = { + 0x00, 0x00, 0x00, 0x01, 0x06, 0x05, 0xff, 0xe4, 0xdc, 0x45, 0xe9, 0xbd, + 0xe6, 0xd9, 0x48, 0xb7, 0x96, 0x2c, 0xd8, 0x20, 0xd9, 0x23, 0xee, 0xef, + 0x78, 0x32, 0x36, 0x34, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x72, 0x65, 0x20, + 0x36, 0x37, 0x20, 0x72, 0x31, 0x31, 0x33, 0x30, 0x20, 0x38, 0x34, 0x37, + 0x35, 0x39, 0x37, 0x37, 0x20, 0x2d, 0x20, 0x48, 0x2e, 0x32, 0x36, 0x34, + 0x2f, 0x4d, 0x50, 0x45, 0x47, 0x2d, 0x34, 0x20, 0x41, 0x56, 0x43, 0x20, + 0x63, 0x6f, 0x64, 0x65, 0x63, 0x20, 0x2d, 0x20, 0x43, 0x6f, 0x70, 0x79, + 0x6c, 0x65, 0x66, 0x74, 0x20, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x32, 0x30, + 0x30, 0x39, 0x20, 0x2d, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6c, 0x61, 0x6e, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x78, 0x32, 0x36, 0x34, 0x2e, 0x68, 0x74, + 0x6d, 0x6c, 0x20, 0x2d, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x20, 0x63, 0x61, 0x62, 0x61, 0x63, 0x3d, 0x31, 0x20, 0x72, 0x65, + 0x66, 0x3d, 0x31, 0x20, 0x64, 0x65, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3d, + 0x31, 0x3a, 0x30, 0x3a, 0x30, 0x20, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x73, + 0x65, 0x3d, 0x30, 0x78, 0x31, 0x3a, 0x30, 0x78, 0x31, 0x31, 0x31, 0x20, + 0x6d, 0x65, 0x3d, 0x68, 0x65, 0x78, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x65, + 0x3d, 0x36, 0x20, 0x70, 0x73, 0x79, 0x5f, 0x72, 0x64, 0x3d, 0x31, 0x2e, + 0x30, 0x3a, 0x30, 0x2e, 0x30, 0x20, 0x6d, 0x69, 0x78, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x66, 0x3d, 0x30, 0x20, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x3d, 0x31, 0x36, 0x20, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x61, + 0x5f, 0x6d, 0x65, 0x3d, 0x31, 0x20, 0x74, 0x72, 0x65, 0x6c, 0x6c, 0x69, + 0x73, 0x3d, 0x30, 0x20, 0x38, 0x78, 0x38, 0x64, 0x63, 0x74, 0x3d, 0x30, + 0x20, 0x63, 0x71, 0x6d, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x61, 0x64, 0x7a, + 0x6f, 0x6e, 0x65, 0x3d, 0x32, 0x31, 0x2c, 0x31, 0x31, 0x20, 0x63, 0x68, + 0x72, 0x6f, 0x6d, 0x61, 0x5f, 0x71, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x3d, 0x2d, 0x32, 0x20, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, + 0x73, 0x3d, 0x31, 0x20, 0x6e, 0x72, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x63, + 0x69, 0x6d, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x20, 0x6d, 0x62, 0x61, 0x66, + 0x66, 0x3d, 0x30, 0x20, 0x62, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x3d, + 0x30, 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x3d, 0x32, 0x35, 0x30, + 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x3d, + 0x32, 0x35, 0x20, 0x73, 0x63, 0x65, 0x6e, 0x65, 0x63, 0x75, 0x74, 0x3d, + 0x34, 0x30, 0x20, 0x72, 0x63, 0x3d, 0x61, 0x62, 0x72, 0x20, 0x62, 0x69, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x30, 0x20, 0x72, 0x61, 0x74, + 0x65, 0x74, 0x6f, 0x6c, 0x3d, 0x31, 0x2e, 0x30, 0x20, 0x71, 0x63, 0x6f, + 0x6d, 0x70, 0x3d, 0x30, 0x2e, 0x36, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x69, + 0x6e, 0x3d, 0x31, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x61, 0x78, 0x3d, 0x35, + 0x31, 0x20, 0x71, 0x70, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x34, 0x20, 0x69, + 0x70, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x3d, 0x31, 0x2e, 0x34, 0x30, + 0x20, 0x61, 0x71, 0x3d, 0x31, 0x3a, 0x31, 0x2e, 0x30, 0x30, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x01, 0x67, 0x4d, 0x40, 0x0a, 0x9a, 0x74, 0xf4, 0x20, + 0x00, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x06, 0x51, 0xe2, 0x44, 0xd4, + 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x32, 0xc8, 0x00, 0x00, 0x00, 0x01, + 0x65, 0x88, 0x80, 0x20, 0x00, 0x08, 0x7f, 0xea, 0x6a, 0xe2, 0x99, 0xb6, + 0x57, 0xae, 0x49, 0x30, 0xf5, 0xfe, 0x5e, 0x46, 0x0b, 0x72, 0x44, 0xc4, + 0xe1, 0xfc, 0x62, 0xda, 0xf1, 0xfb, 0xa2, 0xdb, 0xd6, 0xbe, 0x5c, 0xd7, + 0x24, 0xa3, 0xf5, 0xb9, 0x2f, 0x57, 0x16, 0x49, 0x75, 0x47, 0x77, 0x09, + 0x5c, 0xa1, 0xb4, 0xc3, 0x4f, 0x60, 0x2b, 0xb0, 0x0c, 0xc8, 0xd6, 0x66, + 0xba, 0x9b, 0x82, 0x29, 0x33, 0x92, 0x26, 0x99, 0x31, 0x1c, 0x7f, 0x9b, + 0x00, 0x00, 0x01, 0x0ff, +}; + +static const u8 *codec_h264_eos_sequence(u32 *len) +{ + *len = ARRAY_SIZE(eos_sequence); + return eos_sequence; +} + +struct codec_h264 { + /* H.264 decoder requires an extended firmware */ + void *ext_fw_vaddr; + dma_addr_t ext_fw_paddr; + + /* Buffer for the H.264 Workspace */ + void *workspace_vaddr; + dma_addr_t workspace_paddr; + + /* Buffer for the H.264 references MV */ + void *ref_vaddr; + dma_addr_t ref_paddr; + u32 ref_size; + + /* Buffer for parsed SEI data */ + void *sei_vaddr; + dma_addr_t sei_paddr; + + u32 mb_width; + u32 mb_height; + u32 max_refs; +}; + +static int codec_h264_can_recycle(struct amvdec_core *core) +{ + return !amvdec_read_dos(core, AV_SCRATCH_7) || + !amvdec_read_dos(core, AV_SCRATCH_8); +} + +static void codec_h264_recycle(struct amvdec_core *core, u32 buf_idx) +{ + /* + * Tell the firmware it can recycle this buffer. + * AV_SCRATCH_8 serves the same purpose. + */ + if (!amvdec_read_dos(core, AV_SCRATCH_7)) + amvdec_write_dos(core, AV_SCRATCH_7, buf_idx + 1); + else + amvdec_write_dos(core, AV_SCRATCH_8, buf_idx + 1); +} + +static int codec_h264_start(struct amvdec_session *sess) +{ + u32 workspace_offset; + struct amvdec_core *core = sess->core; + struct codec_h264 *h264 = sess->priv; + + /* Allocate some memory for the H.264 decoder's state */ + h264->workspace_vaddr = + dma_alloc_coherent(core->dev, SIZE_WORKSPACE, + &h264->workspace_paddr, GFP_KERNEL); + if (!h264->workspace_vaddr) + return -ENOMEM; + + /* Allocate some memory for the H.264 SEI dump */ + h264->sei_vaddr = dma_alloc_coherent(core->dev, SIZE_SEI, + &h264->sei_paddr, GFP_KERNEL); + if (!h264->sei_vaddr) + return -ENOMEM; + + amvdec_write_dos_bits(core, POWER_CTL_VLD, BIT(9) | BIT(6)); + + workspace_offset = h264->workspace_paddr - WORKSPACE_BUF_OFFSET; + amvdec_write_dos(core, AV_SCRATCH_1, workspace_offset); + amvdec_write_dos(core, AV_SCRATCH_G, h264->ext_fw_paddr); + amvdec_write_dos(core, AV_SCRATCH_I, h264->sei_paddr - + workspace_offset); + + /* Enable "error correction" */ + amvdec_write_dos(core, AV_SCRATCH_F, + (amvdec_read_dos(core, AV_SCRATCH_F) & 0xffffffc3) | + BIT(4) | BIT(7)); + + amvdec_write_dos(core, MDEC_PIC_DC_THRESH, 0x404038aa); + + return 0; +} + +static int codec_h264_stop(struct amvdec_session *sess) +{ + struct codec_h264 *h264 = sess->priv; + struct amvdec_core *core = sess->core; + + if (h264->ext_fw_vaddr) + dma_free_coherent(core->dev, SIZE_EXT_FW, + h264->ext_fw_vaddr, h264->ext_fw_paddr); + + if (h264->workspace_vaddr) + dma_free_coherent(core->dev, SIZE_WORKSPACE, + h264->workspace_vaddr, h264->workspace_paddr); + + if (h264->ref_vaddr) + dma_free_coherent(core->dev, h264->ref_size, + h264->ref_vaddr, h264->ref_paddr); + + if (h264->sei_vaddr) + dma_free_coherent(core->dev, SIZE_SEI, + h264->sei_vaddr, h264->sei_paddr); + + return 0; +} + +static int codec_h264_load_extended_firmware(struct amvdec_session *sess, + const u8 *data, u32 len) +{ + struct codec_h264 *h264; + struct amvdec_core *core = sess->core; + + if (len < SIZE_EXT_FW) + return -EINVAL; + + h264 = kzalloc(sizeof(*h264), GFP_KERNEL); + if (!h264) + return -ENOMEM; + + h264->ext_fw_vaddr = dma_alloc_coherent(core->dev, SIZE_EXT_FW, + &h264->ext_fw_paddr, + GFP_KERNEL); + if (!h264->ext_fw_vaddr) { + kfree(h264); + return -ENOMEM; + } + + memcpy(h264->ext_fw_vaddr, data, SIZE_EXT_FW); + sess->priv = h264; + + return 0; +} + +static const struct v4l2_fract par_table[] = { + { 1, 1 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, + { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, + { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, + { 64, 33 }, { 160, 99 }, { 4, 3 }, { 3, 2 }, + { 2, 1 } +}; + +static void codec_h264_set_par(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + u32 seq_info = amvdec_read_dos(core, AV_SCRATCH_2); + u32 ar_idc = (seq_info >> AR_IDC_BIT) & AR_IDC_MASK; + + if (!(seq_info & AR_PRESENT_FLAG)) + return; + + if (ar_idc == AR_EXTEND) { + u32 ar_info = amvdec_read_dos(core, AV_SCRATCH_3); + + sess->pixelaspect.numerator = ar_info & 0xffff; + sess->pixelaspect.denominator = (ar_info >> 16) & 0xffff; + return; + } + + if (ar_idc >= ARRAY_SIZE(par_table)) + return; + + sess->pixelaspect = par_table[ar_idc]; +} + +static void codec_h264_resume(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_h264 *h264 = sess->priv; + u32 mb_width, mb_height, mb_total; + + amvdec_set_canvases(sess, + (u32[]){ ANC0_CANVAS_ADDR, 0 }, + (u32[]){ 24, 0 }); + + dev_dbg(core->dev, "max_refs = %u; actual_dpb_size = %u\n", + h264->max_refs, sess->num_dst_bufs); + + /* Align to a multiple of 4 macroblocks */ + mb_width = ALIGN(h264->mb_width, 4); + mb_height = ALIGN(h264->mb_height, 4); + mb_total = mb_width * mb_height; + + h264->ref_size = mb_total * MB_MV_SIZE * h264->max_refs; + h264->ref_vaddr = dma_alloc_coherent(core->dev, h264->ref_size, + &h264->ref_paddr, GFP_KERNEL); + if (!h264->ref_vaddr) { + amvdec_abort(sess); + return; + } + + /* Address to store the references' MVs */ + amvdec_write_dos(core, AV_SCRATCH_1, h264->ref_paddr); + /* End of ref MV */ + amvdec_write_dos(core, AV_SCRATCH_4, h264->ref_paddr + h264->ref_size); + + amvdec_write_dos(core, AV_SCRATCH_0, (h264->max_refs << 24) | + (sess->num_dst_bufs << 16) | + ((h264->max_refs - 1) << 8)); +} + +/* + * Configure the H.264 decoder when the parser detected a parameter set change + */ +static void codec_h264_src_change(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_h264 *h264 = sess->priv; + u32 parsed_info, mb_total; + u32 crop_infor, crop_bottom, crop_right; + u32 frame_width, frame_height; + + sess->keyframe_found = 1; + + parsed_info = amvdec_read_dos(core, AV_SCRATCH_1); + + /* Total number of 16x16 macroblocks */ + mb_total = (parsed_info >> MB_TOTAL_BIT) & MB_TOTAL_MASK; + /* Number of macroblocks per line */ + h264->mb_width = parsed_info & MB_WIDTH_MASK; + /* Number of macroblock lines */ + h264->mb_height = mb_total / h264->mb_width; + + h264->max_refs = ((parsed_info >> MAX_REF_BIT) & MAX_REF_MASK) + 1; + + crop_infor = amvdec_read_dos(core, AV_SCRATCH_6); + crop_bottom = (crop_infor & 0xff); + crop_right = (crop_infor >> 16) & 0xff; + + frame_width = h264->mb_width * 16 - crop_right; + frame_height = h264->mb_height * 16 - crop_bottom; + + dev_dbg(core->dev, "frame: %ux%u; crop: %u %u\n", + frame_width, frame_height, crop_right, crop_bottom); + + codec_h264_set_par(sess); + amvdec_src_change(sess, frame_width, frame_height, h264->max_refs + 5); +} + +/* + * The bitstream offset is split in half in 2 different registers. + * Fetch its MSB here, which location depends on the frame number. + */ +static u32 get_offset_msb(struct amvdec_core *core, int frame_num) +{ + int take_msb = frame_num % 2; + int reg_offset = (frame_num / 2) * 4; + u32 offset_msb = amvdec_read_dos(core, AV_SCRATCH_A + reg_offset); + + if (take_msb) + return offset_msb & 0xffff0000; + + return (offset_msb & 0x0000ffff) << 16; +} + +static void codec_h264_frames_ready(struct amvdec_session *sess, u32 status) +{ + struct amvdec_core *core = sess->core; + int error_count; + int num_frames; + int i; + + error_count = amvdec_read_dos(core, AV_SCRATCH_D); + num_frames = (status >> 8) & 0xff; + if (error_count) { + dev_warn(core->dev, + "decoder error(s) happened, count %d\n", error_count); + amvdec_write_dos(core, AV_SCRATCH_D, 0); + } + + for (i = 0; i < num_frames; i++) { + u32 frame_status = amvdec_read_dos(core, AV_SCRATCH_1 + i * 4); + u32 buffer_index = frame_status & BUF_IDX_MASK; + u32 pic_struct = (frame_status >> PIC_STRUCT_BIT) & + PIC_STRUCT_MASK; + u32 offset = (frame_status >> OFFSET_BIT) & OFFSET_MASK; + u32 field = V4L2_FIELD_NONE; + + /* + * A buffer decode error means it was decoded, + * but part of the picture will have artifacts. + * Typical reason is a temporarily corrupted bitstream + */ + if (frame_status & ERROR_FLAG) + dev_dbg(core->dev, "Buffer %d decode error\n", + buffer_index); + + if (pic_struct == PIC_TOP_BOT) + field = V4L2_FIELD_INTERLACED_TB; + else if (pic_struct == PIC_BOT_TOP) + field = V4L2_FIELD_INTERLACED_BT; + + offset |= get_offset_msb(core, i); + amvdec_dst_buf_done_idx(sess, buffer_index, offset, field); + } +} + +static irqreturn_t codec_h264_threaded_isr(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + u32 status; + u32 size; + u8 cmd; + + status = amvdec_read_dos(core, AV_SCRATCH_0); + cmd = status & CMD_MASK; + + switch (cmd) { + case CMD_SRC_CHANGE: + codec_h264_src_change(sess); + break; + case CMD_FRAMES_READY: + codec_h264_frames_ready(sess, status); + break; + case CMD_FATAL_ERROR: + dev_err(core->dev, "H.264 decoder fatal error\n"); + goto abort; + case CMD_BAD_WIDTH: + size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16; + dev_err(core->dev, "Unsupported video width: %u\n", size); + goto abort; + case CMD_BAD_HEIGHT: + size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16; + dev_err(core->dev, "Unsupported video height: %u\n", size); + goto abort; + case 0: /* Unused but not worth printing for */ + case 9: + break; + default: + dev_info(core->dev, "Unexpected H264 ISR: %08X\n", cmd); + break; + } + + if (cmd && cmd != CMD_SRC_CHANGE) + amvdec_write_dos(core, AV_SCRATCH_0, 0); + + /* Decoder has some SEI data for us ; ignore */ + if (amvdec_read_dos(core, AV_SCRATCH_J) & SEI_DATA_READY) + amvdec_write_dos(core, AV_SCRATCH_J, 0); + + return IRQ_HANDLED; +abort: + amvdec_abort(sess); + return IRQ_HANDLED; +} + +static irqreturn_t codec_h264_isr(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + + amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} + +struct amvdec_codec_ops codec_h264_ops = { + .start = codec_h264_start, + .stop = codec_h264_stop, + .load_extended_firmware = codec_h264_load_extended_firmware, + .isr = codec_h264_isr, + .threaded_isr = codec_h264_threaded_isr, + .can_recycle = codec_h264_can_recycle, + .recycle = codec_h264_recycle, + .eos_sequence = codec_h264_eos_sequence, + .resume = codec_h264_resume, +}; diff --git a/drivers/staging/media/meson/vdec/codec_h264.h b/drivers/staging/media/meson/vdec/codec_h264.h new file mode 100644 index 000000000000..7cb4fb86ff36 --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_h264.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 BayLibre, SAS + * Author: Maxime Jourdan <mjourdan@baylibre.com> + */ + +#ifndef __MESON_VDEC_CODEC_H264_H_ +#define __MESON_VDEC_CODEC_H264_H_ + +#include "vdec.h" + +extern struct amvdec_codec_ops codec_h264_ops; + +#endif diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.c b/drivers/staging/media/meson/vdec/codec_hevc_common.c new file mode 100644 index 000000000000..0315cc0911cd --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_hevc_common.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com> + */ + +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> + +#include "codec_hevc_common.h" +#include "vdec_helpers.h" +#include "hevc_regs.h" + +#define MMU_COMPRESS_HEADER_SIZE 0x48000 +#define MMU_MAP_SIZE 0x4800 + +const u16 vdec_hevc_parser_cmd[] = { + 0x0401, 0x8401, 0x0800, 0x0402, + 0x9002, 0x1423, 0x8CC3, 0x1423, + 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, + 0x8409, 0x8C2A, 0x9C2B, 0x1C00, + 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, + 0x840C, 0x840D, 0xAC00, 0xA000, + 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 +}; + +/* Configure decode head read mode */ +void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit) +{ + struct amvdec_core *core = sess->core; + u32 body_size = amvdec_am21c_body_size(sess->width, sess->height); + u32 head_size = amvdec_am21c_head_size(sess->width, sess->height); + + if (!codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) { + /* Enable 2-plane reference read mode */ + amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(31)); + return; + } + + if (codec_hevc_use_mmu(core->platform->revision, + sess->pixfmt_cap, is_10bit)) + amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(4)); + else + amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, 0); + + if (core->platform->revision < VDEC_REVISION_SM1) + amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL2, body_size / 32); + amvdec_write_dos(core, HEVC_CM_BODY_LENGTH, body_size); + amvdec_write_dos(core, HEVC_CM_HEADER_OFFSET, body_size); + amvdec_write_dos(core, HEVC_CM_HEADER_LENGTH, head_size); +} +EXPORT_SYMBOL_GPL(codec_hevc_setup_decode_head); + +static void codec_hevc_setup_buffers_gxbb(struct amvdec_session *sess, + struct codec_hevc_common *comm, + int is_10bit) +{ + struct amvdec_core *core = sess->core; + struct v4l2_m2m_buffer *buf; + u32 buf_num = v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx); + dma_addr_t buf_y_paddr = 0; + dma_addr_t buf_uv_paddr = 0; + u32 idx = 0; + u32 val; + int i; + + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0); + + v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) { + struct vb2_buffer *vb = &buf->vb.vb2_buf; + + idx = vb->index; + + if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) + buf_y_paddr = comm->fbc_buffer_paddr[idx]; + else + buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0); + + if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) { + val = buf_y_paddr | (idx << 8) | 1; + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + val); + } else { + buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1); + val = buf_y_paddr | ((idx * 2) << 8) | 1; + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + val); + val = buf_uv_paddr | ((idx * 2 + 1) << 8) | 1; + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + val); + } + } + + if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) + val = buf_y_paddr | (idx << 8) | 1; + else + val = buf_y_paddr | ((idx * 2) << 8) | 1; + + /* Fill the remaining unused slots with the last buffer's Y addr */ + for (i = buf_num; i < MAX_REF_PIC_NUM; ++i) + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, val); + + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1); + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1); + for (i = 0; i < 32; ++i) + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + +static void codec_hevc_setup_buffers_gxl(struct amvdec_session *sess, + struct codec_hevc_common *comm, + int is_10bit) +{ + struct amvdec_core *core = sess->core; + struct v4l2_m2m_buffer *buf; + u32 revision = core->platform->revision; + u32 pixfmt_cap = sess->pixfmt_cap; + int i; + + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + BIT(2) | BIT(1)); + + v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) { + struct vb2_buffer *vb = &buf->vb.vb2_buf; + dma_addr_t buf_y_paddr = 0; + dma_addr_t buf_uv_paddr = 0; + u32 idx = vb->index; + + if (codec_hevc_use_mmu(revision, pixfmt_cap, is_10bit)) + buf_y_paddr = comm->mmu_header_paddr[idx]; + else if (codec_hevc_use_downsample(pixfmt_cap, is_10bit)) + buf_y_paddr = comm->fbc_buffer_paddr[idx]; + else + buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0); + + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA, + buf_y_paddr >> 5); + + if (!codec_hevc_use_fbc(pixfmt_cap, is_10bit)) { + buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1); + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA, + buf_uv_paddr >> 5); + } + } + + amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1); + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1); + for (i = 0; i < 32; ++i) + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + +void codec_hevc_free_fbc_buffers(struct amvdec_session *sess, + struct codec_hevc_common *comm) +{ + struct device *dev = sess->core->dev; + u32 am21_size = amvdec_am21c_size(sess->width, sess->height); + int i; + + for (i = 0; i < MAX_REF_PIC_NUM; ++i) { + if (comm->fbc_buffer_vaddr[i]) { + dma_free_coherent(dev, am21_size, + comm->fbc_buffer_vaddr[i], + comm->fbc_buffer_paddr[i]); + comm->fbc_buffer_vaddr[i] = NULL; + } + } +} +EXPORT_SYMBOL_GPL(codec_hevc_free_fbc_buffers); + +static int codec_hevc_alloc_fbc_buffers(struct amvdec_session *sess, + struct codec_hevc_common *comm) +{ + struct device *dev = sess->core->dev; + struct v4l2_m2m_buffer *buf; + u32 am21_size = amvdec_am21c_size(sess->width, sess->height); + + v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) { + u32 idx = buf->vb.vb2_buf.index; + dma_addr_t paddr; + void *vaddr = dma_alloc_coherent(dev, am21_size, &paddr, + GFP_KERNEL); + if (!vaddr) { + codec_hevc_free_fbc_buffers(sess, comm); + return -ENOMEM; + } + + comm->fbc_buffer_vaddr[idx] = vaddr; + comm->fbc_buffer_paddr[idx] = paddr; + } + + return 0; +} + +void codec_hevc_free_mmu_headers(struct amvdec_session *sess, + struct codec_hevc_common *comm) +{ + struct device *dev = sess->core->dev; + int i; + + for (i = 0; i < MAX_REF_PIC_NUM; ++i) { + if (comm->mmu_header_vaddr[i]) { + dma_free_coherent(dev, MMU_COMPRESS_HEADER_SIZE, + comm->mmu_header_vaddr[i], + comm->mmu_header_paddr[i]); + comm->mmu_header_vaddr[i] = NULL; + } + } + + if (comm->mmu_map_vaddr) { + dma_free_coherent(dev, MMU_MAP_SIZE, + comm->mmu_map_vaddr, + comm->mmu_map_paddr); + comm->mmu_map_vaddr = NULL; + } +} +EXPORT_SYMBOL_GPL(codec_hevc_free_mmu_headers); + +static int codec_hevc_alloc_mmu_headers(struct amvdec_session *sess, + struct codec_hevc_common *comm) +{ + struct device *dev = sess->core->dev; + struct v4l2_m2m_buffer *buf; + + comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE, + &comm->mmu_map_paddr, + GFP_KERNEL); + if (!comm->mmu_map_vaddr) + return -ENOMEM; + + v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) { + u32 idx = buf->vb.vb2_buf.index; + dma_addr_t paddr; + void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE, + &paddr, GFP_KERNEL); + if (!vaddr) { + codec_hevc_free_mmu_headers(sess, comm); + return -ENOMEM; + } + + comm->mmu_header_vaddr[idx] = vaddr; + comm->mmu_header_paddr[idx] = paddr; + } + + return 0; +} + +int codec_hevc_setup_buffers(struct amvdec_session *sess, + struct codec_hevc_common *comm, + int is_10bit) +{ + struct amvdec_core *core = sess->core; + int ret; + + if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) { + ret = codec_hevc_alloc_fbc_buffers(sess, comm); + if (ret) + return ret; + } + + if (codec_hevc_use_mmu(core->platform->revision, + sess->pixfmt_cap, is_10bit)) { + ret = codec_hevc_alloc_mmu_headers(sess, comm); + if (ret) { + codec_hevc_free_fbc_buffers(sess, comm); + return ret; + } + } + + if (core->platform->revision == VDEC_REVISION_GXBB) + codec_hevc_setup_buffers_gxbb(sess, comm, is_10bit); + else + codec_hevc_setup_buffers_gxl(sess, comm, is_10bit); + + return 0; +} +EXPORT_SYMBOL_GPL(codec_hevc_setup_buffers); + +void codec_hevc_fill_mmu_map(struct amvdec_session *sess, + struct codec_hevc_common *comm, + struct vb2_buffer *vb) +{ + u32 size = amvdec_am21c_size(sess->width, sess->height); + u32 nb_pages = size / PAGE_SIZE; + u32 *mmu_map = comm->mmu_map_vaddr; + u32 first_page; + u32 i; + + if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) + first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT; + else + first_page = vb2_dma_contig_plane_dma_addr(vb, 0) >> PAGE_SHIFT; + + for (i = 0; i < nb_pages; ++i) + mmu_map[i] = first_page + i; +} +EXPORT_SYMBOL_GPL(codec_hevc_fill_mmu_map); diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.h b/drivers/staging/media/meson/vdec/codec_hevc_common.h new file mode 100644 index 000000000000..88e4379ba1ee --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_hevc_common.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 BayLibre, SAS + * Author: Maxime Jourdan <mjourdan@baylibre.com> + */ + +#ifndef __MESON_VDEC_HEVC_COMMON_H_ +#define __MESON_VDEC_HEVC_COMMON_H_ + +#include "vdec.h" + +#define PARSER_CMD_SKIP_CFG_0 0x0000090b +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define VDEC_HEVC_PARSER_CMD_LEN 37 +extern const u16 vdec_hevc_parser_cmd[VDEC_HEVC_PARSER_CMD_LEN]; + +#define MAX_REF_PIC_NUM 24 + +struct codec_hevc_common { + void *fbc_buffer_vaddr[MAX_REF_PIC_NUM]; + dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM]; + + void *mmu_header_vaddr[MAX_REF_PIC_NUM]; + dma_addr_t mmu_header_paddr[MAX_REF_PIC_NUM]; + + void *mmu_map_vaddr; + dma_addr_t mmu_map_paddr; +}; + +/* Returns 1 if we must use framebuffer compression */ +static inline int codec_hevc_use_fbc(u32 pixfmt, int is_10bit) +{ + /* TOFIX: Handle Amlogic Compressed buffer for 8bit also */ + return is_10bit; +} + +/* Returns 1 if we are decoding 10-bit but outputting 8-bit NV12 */ +static inline int codec_hevc_use_downsample(u32 pixfmt, int is_10bit) +{ + return is_10bit; +} + +/* Returns 1 if we are decoding using the IOMMU */ +static inline int codec_hevc_use_mmu(u32 revision, u32 pixfmt, int is_10bit) +{ + return revision >= VDEC_REVISION_G12A && + codec_hevc_use_fbc(pixfmt, is_10bit); +} + +/** + * Configure decode head read mode + */ +void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit); + +void codec_hevc_free_fbc_buffers(struct amvdec_session *sess, + struct codec_hevc_common *comm); + +void codec_hevc_free_mmu_headers(struct amvdec_session *sess, + struct codec_hevc_common *comm); + +int codec_hevc_setup_buffers(struct amvdec_session *sess, + struct codec_hevc_common *comm, + int is_10bit); + +void codec_hevc_fill_mmu_map(struct amvdec_session *sess, + struct codec_hevc_common *comm, + struct vb2_buffer *vb); + +#endif diff --git a/drivers/staging/media/meson/vdec/codec_vp9.c b/drivers/staging/media/meson/vdec/codec_vp9.c new file mode 100644 index 000000000000..60e4fc0052b3 --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_vp9.c @@ -0,0 +1,2141 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> + +#include "dos_regs.h" +#include "hevc_regs.h" +#include "codec_vp9.h" +#include "vdec_helpers.h" +#include "codec_hevc_common.h" + +/* HEVC reg mapping */ +#define VP9_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 + #define VP9_10B_DECODE_SLICE 5 + #define VP9_HEAD_PARSER_DONE 0xf0 +#define VP9_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define VP9_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2 +#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3 +#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4 +#define VP9_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +#define VP9_SAO_UP HEVC_ASSIST_SCRATCH_6 +#define VP9_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define VP9_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9 +#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B +#define VP9_SCALELUT HEVC_ASSIST_SCRATCH_D +#define VP9_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I +#define VP9_DECODE_MODE HEVC_ASSIST_SCRATCH_J + #define DECODE_MODE_SINGLE 0 +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N + +/* VP9 Constants */ +#define LCU_SIZE 64 +#define MAX_REF_PIC_NUM 24 +#define REFS_PER_FRAME 3 +#define REF_FRAMES 8 +#define MV_MEM_UNIT 0x240 +#define ADAPT_PROB_SIZE 0xf80 + +enum FRAME_TYPE { + KEY_FRAME = 0, + INTER_FRAME = 1, + FRAME_TYPES, +}; + +/* VP9 Workspace layout */ +#define MPRED_MV_BUF_SIZE 0x120000 + +#define IPP_SIZE 0x4000 +#define SAO_ABV_SIZE 0x30000 +#define SAO_VB_SIZE 0x30000 +#define SH_TM_RPS_SIZE 0x800 +#define VPS_SIZE 0x800 +#define SPS_SIZE 0x800 +#define PPS_SIZE 0x2000 +#define SAO_UP_SIZE 0x2800 +#define SWAP_BUF_SIZE 0x800 +#define SWAP_BUF2_SIZE 0x800 +#define SCALELUT_SIZE 0x8000 +#define DBLK_PARA_SIZE 0x80000 +#define DBLK_DATA_SIZE 0x80000 +#define SEG_MAP_SIZE 0xd800 +#define PROB_SIZE 0x5000 +#define COUNT_SIZE 0x3000 +#define MMU_VBH_SIZE 0x5000 +#define MPRED_ABV_SIZE 0x10000 +#define MPRED_MV_SIZE (MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM) +#define RPM_BUF_SIZE 0x100 +#define LMEM_SIZE 0x800 + +#define IPP_OFFSET 0x00 +#define SAO_ABV_OFFSET (IPP_OFFSET + IPP_SIZE) +#define SAO_VB_OFFSET (SAO_ABV_OFFSET + SAO_ABV_SIZE) +#define SH_TM_RPS_OFFSET (SAO_VB_OFFSET + SAO_VB_SIZE) +#define VPS_OFFSET (SH_TM_RPS_OFFSET + SH_TM_RPS_SIZE) +#define SPS_OFFSET (VPS_OFFSET + VPS_SIZE) +#define PPS_OFFSET (SPS_OFFSET + SPS_SIZE) +#define SAO_UP_OFFSET (PPS_OFFSET + PPS_SIZE) +#define SWAP_BUF_OFFSET (SAO_UP_OFFSET + SAO_UP_SIZE) +#define SWAP_BUF2_OFFSET (SWAP_BUF_OFFSET + SWAP_BUF_SIZE) +#define SCALELUT_OFFSET (SWAP_BUF2_OFFSET + SWAP_BUF2_SIZE) +#define DBLK_PARA_OFFSET (SCALELUT_OFFSET + SCALELUT_SIZE) +#define DBLK_DATA_OFFSET (DBLK_PARA_OFFSET + DBLK_PARA_SIZE) +#define SEG_MAP_OFFSET (DBLK_DATA_OFFSET + DBLK_DATA_SIZE) +#define PROB_OFFSET (SEG_MAP_OFFSET + SEG_MAP_SIZE) +#define COUNT_OFFSET (PROB_OFFSET + PROB_SIZE) +#define MMU_VBH_OFFSET (COUNT_OFFSET + COUNT_SIZE) +#define MPRED_ABV_OFFSET (MMU_VBH_OFFSET + MMU_VBH_SIZE) +#define MPRED_MV_OFFSET (MPRED_ABV_OFFSET + MPRED_ABV_SIZE) +#define RPM_OFFSET (MPRED_MV_OFFSET + MPRED_MV_SIZE) +#define LMEM_OFFSET (RPM_OFFSET + RPM_BUF_SIZE) + +#define SIZE_WORKSPACE ALIGN(LMEM_OFFSET + LMEM_SIZE, 64 * SZ_1K) + +#define NONE -1 +#define INTRA_FRAME 0 +#define LAST_FRAME 1 +#define GOLDEN_FRAME 2 +#define ALTREF_FRAME 3 +#define MAX_REF_FRAMES 4 + +/* + * Defines, declarations, sub-functions for vp9 de-block loop + filter Thr/Lvl table update + * - struct segmentation is for loop filter only (removed something) + * - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will + be instantiated in C_Entry + * - vp9_loop_filter_init run once before decoding start + * - vp9_loop_filter_frame_init run before every frame decoding start + * - set video format to VP9 is in vp9_loop_filter_init + */ +#define MAX_LOOP_FILTER 63 +#define MAX_REF_LF_DELTAS 4 +#define MAX_MODE_LF_DELTAS 2 +#define SEGMENT_DELTADATA 0 +#define SEGMENT_ABSDATA 1 +#define MAX_SEGMENTS 8 + +/* VP9 PROB processing defines */ +#define VP9_PARTITION_START 0 +#define VP9_PARTITION_SIZE_STEP (3 * 4) +#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP) +#define VP9_PARTITION_KEY_START 0 +#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE +#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE) +#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE) +#define VP9_SKIP_SIZE 4 /* only use 3*/ +#define VP9_TX_MODE_START (VP9_SKIP_START + VP9_SKIP_SIZE) +#define VP9_TX_MODE_8_0_OFFSET 0 +#define VP9_TX_MODE_8_1_OFFSET 1 +#define VP9_TX_MODE_16_0_OFFSET 2 +#define VP9_TX_MODE_16_1_OFFSET 4 +#define VP9_TX_MODE_32_0_OFFSET 6 +#define VP9_TX_MODE_32_1_OFFSET 9 +#define VP9_TX_MODE_SIZE 12 +#define VP9_COEF_START (VP9_TX_MODE_START + VP9_TX_MODE_SIZE) +#define VP9_COEF_BAND_0_OFFSET 0 +#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1) +#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3) +#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3) +#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3) +#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3) +#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 + 5 * 6) * 3 + 1 padding)*/ +#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_INTER_MODE_START (VP9_COEF_START + VP9_COEF_SIZE) +#define VP9_INTER_MODE_SIZE 24 /* only use 21 (# * 7)*/ +#define VP9_INTERP_START (VP9_INTER_MODE_START + VP9_INTER_MODE_SIZE) +#define VP9_INTERP_SIZE 8 +#define VP9_INTRA_INTER_START (VP9_INTERP_START + VP9_INTERP_SIZE) +#define VP9_INTRA_INTER_SIZE 4 +#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START +#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_START \ + (VP9_INTERP_INTRA_INTER_START + VP9_INTERP_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_SIZE 5 +#define VP9_COMP_REF_START (VP9_COMP_INTER_START + VP9_COMP_INTER_SIZE) +#define VP9_COMP_REF_SIZE 5 +#define VP9_SINGLE_REF_START (VP9_COMP_REF_START + VP9_COMP_REF_SIZE) +#define VP9_SINGLE_REF_SIZE 10 +#define VP9_REF_MODE_START VP9_COMP_INTER_START +#define VP9_REF_MODE_SIZE \ + (VP9_COMP_INTER_SIZE + VP9_COMP_REF_SIZE + VP9_SINGLE_REF_SIZE) +#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START + VP9_REF_MODE_SIZE) +#define VP9_IF_Y_MODE_SIZE 36 +#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START + VP9_IF_Y_MODE_SIZE) +#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/ +#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START + VP9_IF_UV_MODE_SIZE) +#define VP9_MV_JOINTS_SIZE 3 +#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START + VP9_MV_JOINTS_SIZE) +#define VP9_MV_SIGN_0_SIZE 1 +#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START + VP9_MV_SIGN_0_SIZE) +#define VP9_MV_CLASSES_0_SIZE 10 +#define VP9_MV_CLASS0_0_START \ + (VP9_MV_CLASSES_0_START + VP9_MV_CLASSES_0_SIZE) +#define VP9_MV_CLASS0_0_SIZE 1 +#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START + VP9_MV_CLASS0_0_SIZE) +#define VP9_MV_BITS_0_SIZE 10 +#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START + VP9_MV_BITS_0_SIZE) +#define VP9_MV_SIGN_1_SIZE 1 +#define VP9_MV_CLASSES_1_START \ + (VP9_MV_SIGN_1_START + VP9_MV_SIGN_1_SIZE) +#define VP9_MV_CLASSES_1_SIZE 10 +#define VP9_MV_CLASS0_1_START \ + (VP9_MV_CLASSES_1_START + VP9_MV_CLASSES_1_SIZE) +#define VP9_MV_CLASS0_1_SIZE 1 +#define VP9_MV_BITS_1_START \ + (VP9_MV_CLASS0_1_START + VP9_MV_CLASS0_1_SIZE) +#define VP9_MV_BITS_1_SIZE 10 +#define VP9_MV_CLASS0_FP_0_START \ + (VP9_MV_BITS_1_START + VP9_MV_BITS_1_SIZE) +#define VP9_MV_CLASS0_FP_0_SIZE 9 +#define VP9_MV_CLASS0_FP_1_START \ + (VP9_MV_CLASS0_FP_0_START + VP9_MV_CLASS0_FP_0_SIZE) +#define VP9_MV_CLASS0_FP_1_SIZE 9 +#define VP9_MV_CLASS0_HP_0_START \ + (VP9_MV_CLASS0_FP_1_START + VP9_MV_CLASS0_FP_1_SIZE) +#define VP9_MV_CLASS0_HP_0_SIZE 2 +#define VP9_MV_CLASS0_HP_1_START \ + (VP9_MV_CLASS0_HP_0_START + VP9_MV_CLASS0_HP_0_SIZE) +#define VP9_MV_CLASS0_HP_1_SIZE 2 +#define VP9_MV_START VP9_MV_JOINTS_START +#define VP9_MV_SIZE 72 /*only use 69*/ + +#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE) + +/* VP9 COUNT mem processing defines */ +#define VP9_COEF_COUNT_START 0 +#define VP9_COEF_COUNT_BAND_0_OFFSET 0 +#define VP9_COEF_COUNT_BAND_1_OFFSET \ + (VP9_COEF_COUNT_BAND_0_OFFSET + 3 * 5) +#define VP9_COEF_COUNT_BAND_2_OFFSET \ + (VP9_COEF_COUNT_BAND_1_OFFSET + 6 * 5) +#define VP9_COEF_COUNT_BAND_3_OFFSET \ + (VP9_COEF_COUNT_BAND_2_OFFSET + 6 * 5) +#define VP9_COEF_COUNT_BAND_4_OFFSET \ + (VP9_COEF_COUNT_BAND_3_OFFSET + 6 * 5) +#define VP9_COEF_COUNT_BAND_5_OFFSET \ + (VP9_COEF_COUNT_BAND_4_OFFSET + 6 * 5) +#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 + 5 * 6) * 5 */ +#define VP9_COEF_COUNT_4X4_START \ + (VP9_COEF_COUNT_START + 0 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_8X8_START \ + (VP9_COEF_COUNT_START + 4 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_16X16_START \ + (VP9_COEF_COUNT_START + 8 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_32X32_START \ + (VP9_COEF_COUNT_START + 12 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET) + +#define VP9_INTRA_INTER_COUNT_START \ + (VP9_COEF_COUNT_START + VP9_COEF_COUNT_SIZE) +#define VP9_INTRA_INTER_COUNT_SIZE (4 * 2) +#define VP9_COMP_INTER_COUNT_START \ + (VP9_INTRA_INTER_COUNT_START + VP9_INTRA_INTER_COUNT_SIZE) +#define VP9_COMP_INTER_COUNT_SIZE (5 * 2) +#define VP9_COMP_REF_COUNT_START \ + (VP9_COMP_INTER_COUNT_START + VP9_COMP_INTER_COUNT_SIZE) +#define VP9_COMP_REF_COUNT_SIZE (5 * 2) +#define VP9_SINGLE_REF_COUNT_START \ + (VP9_COMP_REF_COUNT_START + VP9_COMP_REF_COUNT_SIZE) +#define VP9_SINGLE_REF_COUNT_SIZE (10 * 2) +#define VP9_TX_MODE_COUNT_START \ + (VP9_SINGLE_REF_COUNT_START + VP9_SINGLE_REF_COUNT_SIZE) +#define VP9_TX_MODE_COUNT_SIZE (12 * 2) +#define VP9_SKIP_COUNT_START \ + (VP9_TX_MODE_COUNT_START + VP9_TX_MODE_COUNT_SIZE) +#define VP9_SKIP_COUNT_SIZE (3 * 2) +#define VP9_MV_SIGN_0_COUNT_START \ + (VP9_SKIP_COUNT_START + VP9_SKIP_COUNT_SIZE) +#define VP9_MV_SIGN_0_COUNT_SIZE (1 * 2) +#define VP9_MV_SIGN_1_COUNT_START \ + (VP9_MV_SIGN_0_COUNT_START + VP9_MV_SIGN_0_COUNT_SIZE) +#define VP9_MV_SIGN_1_COUNT_SIZE (1 * 2) +#define VP9_MV_BITS_0_COUNT_START \ + (VP9_MV_SIGN_1_COUNT_START + VP9_MV_SIGN_1_COUNT_SIZE) +#define VP9_MV_BITS_0_COUNT_SIZE (10 * 2) +#define VP9_MV_BITS_1_COUNT_START \ + (VP9_MV_BITS_0_COUNT_START + VP9_MV_BITS_0_COUNT_SIZE) +#define VP9_MV_BITS_1_COUNT_SIZE (10 * 2) +#define VP9_MV_CLASS0_HP_0_COUNT_START \ + (VP9_MV_BITS_1_COUNT_START + VP9_MV_BITS_1_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2 * 2) +#define VP9_MV_CLASS0_HP_1_COUNT_START \ + (VP9_MV_CLASS0_HP_0_COUNT_START + VP9_MV_CLASS0_HP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2 * 2) + +/* Start merge_tree */ +#define VP9_INTER_MODE_COUNT_START \ + (VP9_MV_CLASS0_HP_1_COUNT_START + VP9_MV_CLASS0_HP_1_COUNT_SIZE) +#define VP9_INTER_MODE_COUNT_SIZE (7 * 4) +#define VP9_IF_Y_MODE_COUNT_START \ + (VP9_INTER_MODE_COUNT_START + VP9_INTER_MODE_COUNT_SIZE) +#define VP9_IF_Y_MODE_COUNT_SIZE (10 * 4) +#define VP9_IF_UV_MODE_COUNT_START \ + (VP9_IF_Y_MODE_COUNT_START + VP9_IF_Y_MODE_COUNT_SIZE) +#define VP9_IF_UV_MODE_COUNT_SIZE (10 * 10) +#define VP9_PARTITION_P_COUNT_START \ + (VP9_IF_UV_MODE_COUNT_START + VP9_IF_UV_MODE_COUNT_SIZE) +#define VP9_PARTITION_P_COUNT_SIZE (4 * 4 * 4) +#define VP9_INTERP_COUNT_START \ + (VP9_PARTITION_P_COUNT_START + VP9_PARTITION_P_COUNT_SIZE) +#define VP9_INTERP_COUNT_SIZE (4 * 3) +#define VP9_MV_JOINTS_COUNT_START \ + (VP9_INTERP_COUNT_START + VP9_INTERP_COUNT_SIZE) +#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4) +#define VP9_MV_CLASSES_0_COUNT_START \ + (VP9_MV_JOINTS_COUNT_START + VP9_MV_JOINTS_COUNT_SIZE) +#define VP9_MV_CLASSES_0_COUNT_SIZE (1 * 11) +#define VP9_MV_CLASS0_0_COUNT_START \ + (VP9_MV_CLASSES_0_COUNT_START + VP9_MV_CLASSES_0_COUNT_SIZE) +#define VP9_MV_CLASS0_0_COUNT_SIZE (1 * 2) +#define VP9_MV_CLASSES_1_COUNT_START \ + (VP9_MV_CLASS0_0_COUNT_START + VP9_MV_CLASS0_0_COUNT_SIZE) +#define VP9_MV_CLASSES_1_COUNT_SIZE (1 * 11) +#define VP9_MV_CLASS0_1_COUNT_START \ + (VP9_MV_CLASSES_1_COUNT_START + VP9_MV_CLASSES_1_COUNT_SIZE) +#define VP9_MV_CLASS0_1_COUNT_SIZE (1 * 2) +#define VP9_MV_CLASS0_FP_0_COUNT_START \ + (VP9_MV_CLASS0_1_COUNT_START + VP9_MV_CLASS0_1_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3 * 4) +#define VP9_MV_CLASS0_FP_1_COUNT_START \ + (VP9_MV_CLASS0_FP_0_COUNT_START + VP9_MV_CLASS0_FP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3 * 4) + +#define DC_PRED 0 /* Average of above and left pixels */ +#define V_PRED 1 /* Vertical */ +#define H_PRED 2 /* Horizontal */ +#define D45_PRED 3 /* Directional 45 deg = round(arctan(1/1) * 180/pi) */ +#define D135_PRED 4 /* Directional 135 deg = 180 - 45 */ +#define D117_PRED 5 /* Directional 117 deg = 180 - 63 */ +#define D153_PRED 6 /* Directional 153 deg = 180 - 27 */ +#define D207_PRED 7 /* Directional 207 deg = 180 + 27 */ +#define D63_PRED 8 /* Directional 63 deg = round(arctan(2/1) * 180/pi) */ +#define TM_PRED 9 /* True-motion */ + +/* Use a static inline to avoid possible side effect from num being reused */ +static inline int round_power_of_two(int value, int num) +{ + return (value + (1 << (num - 1))) >> num; +} + +#define MODE_MV_COUNT_SAT 20 +static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = { + 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64, + 70, 76, 83, 89, 96, 102, 108, 115, 121, 128 +}; + +union rpm_param { + struct { + u16 data[RPM_BUF_SIZE]; + } l; + struct { + u16 profile; + u16 show_existing_frame; + u16 frame_to_show_idx; + u16 frame_type; /*1 bit*/ + u16 show_frame; /*1 bit*/ + u16 error_resilient_mode; /*1 bit*/ + u16 intra_only; /*1 bit*/ + u16 display_size_present; /*1 bit*/ + u16 reset_frame_context; + u16 refresh_frame_flags; + u16 width; + u16 height; + u16 display_width; + u16 display_height; + u16 ref_info; + u16 same_frame_size; + u16 mode_ref_delta_enabled; + u16 ref_deltas[4]; + u16 mode_deltas[2]; + u16 filter_level; + u16 sharpness_level; + u16 bit_depth; + u16 seg_quant_info[8]; + u16 seg_enabled; + u16 seg_abs_delta; + /* bit 15: feature enabled; bit 8, sign; bit[5:0], data */ + u16 seg_lf_info[8]; + } p; +}; + +enum SEG_LVL_FEATURES { + SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer */ + SEG_LVL_ALT_LF = 1, /* Use alternate loop filter value */ + SEG_LVL_REF_FRAME = 2, /* Optional Segment reference frame */ + SEG_LVL_SKIP = 3, /* Optional Segment (0,0) + skip mode */ + SEG_LVL_MAX = 4 /* Number of features supported */ +}; + +struct segmentation { + u8 enabled; + u8 update_map; + u8 update_data; + u8 abs_delta; + u8 temporal_update; + s16 feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; + unsigned int feature_mask[MAX_SEGMENTS]; +}; + +struct loop_filter_thresh { + u8 mblim; + u8 lim; + u8 hev_thr; +}; + +struct loop_filter_info_n { + struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; + u8 lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS]; +}; + +struct loopfilter { + int filter_level; + + int sharpness_level; + int last_sharpness_level; + + u8 mode_ref_delta_enabled; + u8 mode_ref_delta_update; + + /*0 = Intra, Last, GF, ARF*/ + signed char ref_deltas[MAX_REF_LF_DELTAS]; + signed char last_ref_deltas[MAX_REF_LF_DELTAS]; + + /*0 = ZERO_MV, MV*/ + signed char mode_deltas[MAX_MODE_LF_DELTAS]; + signed char last_mode_deltas[MAX_MODE_LF_DELTAS]; +}; + +struct vp9_frame { + struct list_head list; + struct vb2_v4l2_buffer *vbuf; + int index; + int intra_only; + int show; + int type; + int done; + unsigned int width; + unsigned int height; +}; + +struct codec_vp9 { + /* VP9 context lock */ + struct mutex lock; + + /* Common part with the HEVC decoder */ + struct codec_hevc_common common; + + /* Buffer for the VP9 Workspace */ + void *workspace_vaddr; + dma_addr_t workspace_paddr; + + /* Contains many information parsed from the bitstream */ + union rpm_param rpm_param; + + /* Whether we detected the bitstream as 10-bit */ + int is_10bit; + + /* Coded resolution reported by the hardware */ + u32 width, height; + + /* All ref frames used by the HW at a given time */ + struct list_head ref_frames_list; + u32 frames_num; + + /* In case of downsampling (decoding with FBC but outputting in NV12M), + * we need to allocate additional buffers for FBC. + */ + void *fbc_buffer_vaddr[MAX_REF_PIC_NUM]; + dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM]; + + int ref_frame_map[REF_FRAMES]; + int next_ref_frame_map[REF_FRAMES]; + struct vp9_frame *frame_refs[REFS_PER_FRAME]; + + u32 lcu_total; + + /* loop filter */ + int default_filt_lvl; + struct loop_filter_info_n lfi; + struct loopfilter lf; + struct segmentation seg_4lf; + + struct vp9_frame *cur_frame; + struct vp9_frame *prev_frame; +}; + +static int div_r32(s64 m, int n) +{ + s64 qu = div_s64(m, n); + + return (int)qu; +} + +static int clip_prob(int p) +{ + return clamp_val(p, 1, 255); +} + +static int segfeature_active(struct segmentation *seg, int segment_id, + enum SEG_LVL_FEATURES feature_id) +{ + return seg->enabled && + (seg->feature_mask[segment_id] & (1 << feature_id)); +} + +static int get_segdata(struct segmentation *seg, int segment_id, + enum SEG_LVL_FEATURES feature_id) +{ + return seg->feature_data[segment_id][feature_id]; +} + +static void vp9_update_sharpness(struct loop_filter_info_n *lfi, + int sharpness_lvl) +{ + int lvl; + + /* For each possible value for the loop filter fill out limits*/ + for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { + /* Set loop filter parameters that control sharpness.*/ + int block_inside_limit = lvl >> ((sharpness_lvl > 0) + + (sharpness_lvl > 4)); + + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) + block_inside_limit = (9 - sharpness_lvl); + } + + if (block_inside_limit < 1) + block_inside_limit = 1; + + lfi->lfthr[lvl].lim = (u8)block_inside_limit; + lfi->lfthr[lvl].mblim = (u8)(2 * (lvl + 2) + + block_inside_limit); + } +} + +/* Instantiate this function once when decode is started */ +static void +vp9_loop_filter_init(struct amvdec_core *core, struct codec_vp9 *vp9) +{ + struct loop_filter_info_n *lfi = &vp9->lfi; + struct loopfilter *lf = &vp9->lf; + struct segmentation *seg_4lf = &vp9->seg_4lf; + int i; + + memset(lfi, 0, sizeof(struct loop_filter_info_n)); + memset(lf, 0, sizeof(struct loopfilter)); + memset(seg_4lf, 0, sizeof(struct segmentation)); + lf->sharpness_level = 0; + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) | + (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) | + (lfi->lfthr[i * 2].mblim & 0xff); + + amvdec_write_dos(core, HEVC_DBLK_CFG9, thr); + } + + if (core->platform->revision >= VDEC_REVISION_SM1) + amvdec_write_dos(core, HEVC_DBLK_CFGB, + (0x3 << 14) | /* dw fifo thres r and b */ + (0x3 << 12) | /* dw fifo thres r or b */ + (0x3 << 10) | /* dw fifo thres not r/b */ + BIT(0)); /* VP9 video format */ + else if (core->platform->revision >= VDEC_REVISION_G12A) + /* VP9 video format */ + amvdec_write_dos(core, HEVC_DBLK_CFGB, (0x54 << 8) | BIT(0)); + else + amvdec_write_dos(core, HEVC_DBLK_CFGB, 0x40400001); +} + +static void +vp9_loop_filter_frame_init(struct amvdec_core *core, struct segmentation *seg, + struct loop_filter_info_n *lfi, + struct loopfilter *lf, int default_filt_lvl) +{ + int i; + int seg_id; + + /* + * n_shift is the multiplier for lf_deltas + * the multiplier is: + * - 1 for when filter_lvl is between 0 and 31 + * - 2 when filter_lvl is between 32 and 63 + */ + const int scale = 1 << (default_filt_lvl >> 5); + + /* update limits if sharpness has changed */ + if (lf->last_sharpness_level != lf->sharpness_level) { + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + + /* Write to register */ + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) | + (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr << 16) | + ((lfi->lfthr[i * 2].lim & 0x3f) << 8) | + (lfi->lfthr[i * 2].mblim & 0xff); + + amvdec_write_dos(core, HEVC_DBLK_CFG9, thr); + } + } + + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { + int lvl_seg = default_filt_lvl; + + if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { + const int data = get_segdata(seg, seg_id, + SEG_LVL_ALT_LF); + lvl_seg = clamp_t(int, + seg->abs_delta == SEGMENT_ABSDATA ? + data : default_filt_lvl + data, + 0, MAX_LOOP_FILTER); + } + + if (!lf->mode_ref_delta_enabled) { + /* + * We could get rid of this if we assume that deltas + * are set to zero when not in use. + * encoder always uses deltas + */ + memset(lfi->lvl[seg_id], lvl_seg, + sizeof(lfi->lvl[seg_id])); + } else { + int ref, mode; + const int intra_lvl = + lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale; + lfi->lvl[seg_id][INTRA_FRAME][0] = + clamp_val(intra_lvl, 0, MAX_LOOP_FILTER); + + for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { + for (mode = 0; mode < MAX_MODE_LF_DELTAS; + ++mode) { + const int inter_lvl = + lvl_seg + + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; + lfi->lvl[seg_id][ref][mode] = + clamp_val(inter_lvl, 0, + MAX_LOOP_FILTER); + } + } + } + } + + for (i = 0; i < 16; i++) { + unsigned int level; + + level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) | + ((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) | + ((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) | + (lfi->lvl[i >> 1][0][i & 1] & 0x3f); + if (!default_filt_lvl) + level = 0; + + amvdec_write_dos(core, HEVC_DBLK_CFGA, level); + } +} + +static void codec_vp9_flush_output(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + struct vp9_frame *tmp, *n; + + mutex_lock(&vp9->lock); + list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) { + if (!tmp->done) { + if (tmp->show) + amvdec_dst_buf_done(sess, tmp->vbuf, + V4L2_FIELD_NONE); + else + v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf); + + vp9->frames_num--; + } + + list_del(&tmp->list); + kfree(tmp); + } + mutex_unlock(&vp9->lock); +} + +static u32 codec_vp9_num_pending_bufs(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + + if (!vp9) + return 0; + + return vp9->frames_num; +} + +static int codec_vp9_alloc_workspace(struct amvdec_core *core, + struct codec_vp9 *vp9) +{ + /* Allocate some memory for the VP9 decoder's state */ + vp9->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE, + &vp9->workspace_paddr, + GFP_KERNEL); + if (!vp9->workspace_vaddr) { + dev_err(core->dev, "Failed to allocate VP9 Workspace\n"); + return -ENOMEM; + } + + return 0; +} + +static void codec_vp9_setup_workspace(struct amvdec_session *sess, + struct codec_vp9 *vp9) +{ + struct amvdec_core *core = sess->core; + u32 revision = core->platform->revision; + dma_addr_t wkaddr = vp9->workspace_paddr; + + amvdec_write_dos(core, HEVCD_IPP_LINEBUFF_BASE, wkaddr + IPP_OFFSET); + amvdec_write_dos(core, VP9_RPM_BUFFER, wkaddr + RPM_OFFSET); + amvdec_write_dos(core, VP9_SHORT_TERM_RPS, wkaddr + SH_TM_RPS_OFFSET); + amvdec_write_dos(core, VP9_PPS_BUFFER, wkaddr + PPS_OFFSET); + amvdec_write_dos(core, VP9_SAO_UP, wkaddr + SAO_UP_OFFSET); + + amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER, + wkaddr + SWAP_BUF_OFFSET); + amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER2, + wkaddr + SWAP_BUF2_OFFSET); + amvdec_write_dos(core, VP9_SCALELUT, wkaddr + SCALELUT_OFFSET); + + if (core->platform->revision >= VDEC_REVISION_G12A) + amvdec_write_dos(core, HEVC_DBLK_CFGE, + wkaddr + DBLK_PARA_OFFSET); + + amvdec_write_dos(core, HEVC_DBLK_CFG4, wkaddr + DBLK_PARA_OFFSET); + amvdec_write_dos(core, HEVC_DBLK_CFG5, wkaddr + DBLK_DATA_OFFSET); + amvdec_write_dos(core, VP9_SEG_MAP_BUFFER, wkaddr + SEG_MAP_OFFSET); + amvdec_write_dos(core, VP9_PROB_SWAP_BUFFER, wkaddr + PROB_OFFSET); + amvdec_write_dos(core, VP9_COUNT_SWAP_BUFFER, wkaddr + COUNT_OFFSET); + amvdec_write_dos(core, LMEM_DUMP_ADR, wkaddr + LMEM_OFFSET); + + if (codec_hevc_use_mmu(revision, sess->pixfmt_cap, vp9->is_10bit)) { + amvdec_write_dos(core, HEVC_SAO_MMU_VH0_ADDR, + wkaddr + MMU_VBH_OFFSET); + amvdec_write_dos(core, HEVC_SAO_MMU_VH1_ADDR, + wkaddr + MMU_VBH_OFFSET + (MMU_VBH_SIZE / 2)); + + if (revision >= VDEC_REVISION_G12A) + amvdec_write_dos(core, HEVC_ASSIST_MMU_MAP_ADDR, + vp9->common.mmu_map_paddr); + else + amvdec_write_dos(core, VP9_MMU_MAP_BUFFER, + vp9->common.mmu_map_paddr); + } +} + +static int codec_vp9_start(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_vp9 *vp9; + u32 val; + int i; + int ret; + + vp9 = kzalloc(sizeof(*vp9), GFP_KERNEL); + if (!vp9) + return -ENOMEM; + + ret = codec_vp9_alloc_workspace(core, vp9); + if (ret) + goto free_vp9; + + codec_vp9_setup_workspace(sess, vp9); + amvdec_write_dos_bits(core, HEVC_STREAM_CONTROL, BIT(0)); + /* stream_fifo_hole */ + if (core->platform->revision >= VDEC_REVISION_G12A) + amvdec_write_dos_bits(core, HEVC_STREAM_FIFO_CTL, BIT(29)); + + val = amvdec_read_dos(core, HEVC_PARSER_INT_CONTROL) & 0x7fffffff; + val |= (3 << 29) | BIT(24) | BIT(22) | BIT(7) | BIT(4) | BIT(0); + amvdec_write_dos(core, HEVC_PARSER_INT_CONTROL, val); + amvdec_write_dos_bits(core, HEVC_SHIFT_STATUS, BIT(0)); + amvdec_write_dos(core, HEVC_SHIFT_CONTROL, BIT(10) | BIT(9) | + (3 << 6) | BIT(5) | BIT(2) | BIT(1) | BIT(0)); + amvdec_write_dos(core, HEVC_CABAC_CONTROL, BIT(0)); + amvdec_write_dos(core, HEVC_PARSER_CORE_CONTROL, BIT(0)); + amvdec_write_dos(core, HEVC_SHIFT_STARTCODE, 0x00000001); + + amvdec_write_dos(core, VP9_DEC_STATUS_REG, 0); + + amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE, BIT(16)); + for (i = 0; i < ARRAY_SIZE(vdec_hevc_parser_cmd); ++i) + amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE, + vdec_hevc_parser_cmd[i]); + + amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + amvdec_write_dos(core, HEVC_PARSER_IF_CONTROL, + BIT(5) | BIT(2) | BIT(0)); + + amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(0)); + amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(1)); + + amvdec_write_dos(core, VP9_WAIT_FLAG, 1); + + /* clear mailbox interrupt */ + amvdec_write_dos(core, HEVC_ASSIST_MBOX1_CLR_REG, 1); + /* enable mailbox interrupt */ + amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 1); + /* disable PSCALE for hardware sharing */ + amvdec_write_dos(core, HEVC_PSCALE_CTRL, 0); + /* Let the uCode do all the parsing */ + amvdec_write_dos(core, NAL_SEARCH_CTL, 0x8); + + amvdec_write_dos(core, DECODE_STOP_POS, 0); + amvdec_write_dos(core, VP9_DECODE_MODE, DECODE_MODE_SINGLE); + + pr_debug("decode_count: %u; decode_size: %u\n", + amvdec_read_dos(core, HEVC_DECODE_COUNT), + amvdec_read_dos(core, HEVC_DECODE_SIZE)); + + vp9_loop_filter_init(core, vp9); + + INIT_LIST_HEAD(&vp9->ref_frames_list); + mutex_init(&vp9->lock); + memset(&vp9->ref_frame_map, -1, sizeof(vp9->ref_frame_map)); + memset(&vp9->next_ref_frame_map, -1, sizeof(vp9->next_ref_frame_map)); + for (i = 0; i < REFS_PER_FRAME; ++i) + vp9->frame_refs[i] = NULL; + sess->priv = vp9; + + return 0; + +free_vp9: + kfree(vp9); + return ret; +} + +static int codec_vp9_stop(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_vp9 *vp9 = sess->priv; + + mutex_lock(&vp9->lock); + if (vp9->workspace_vaddr) + dma_free_coherent(core->dev, SIZE_WORKSPACE, + vp9->workspace_vaddr, + vp9->workspace_paddr); + + codec_hevc_free_fbc_buffers(sess, &vp9->common); + mutex_unlock(&vp9->lock); + + return 0; +} + +static void codec_vp9_set_sao(struct amvdec_session *sess, + struct vb2_buffer *vb) +{ + struct amvdec_core *core = sess->core; + struct codec_vp9 *vp9 = sess->priv; + + dma_addr_t buf_y_paddr; + dma_addr_t buf_u_v_paddr; + u32 val; + + if (codec_hevc_use_downsample(sess->pixfmt_cap, vp9->is_10bit)) + buf_y_paddr = + vp9->common.fbc_buffer_paddr[vb->index]; + else + buf_y_paddr = + vb2_dma_contig_plane_dma_addr(vb, 0); + + if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) { + val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0200; + amvdec_write_dos(core, HEVC_SAO_CTRL5, val); + amvdec_write_dos(core, HEVC_CM_BODY_START_ADDR, buf_y_paddr); + } + + if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) { + buf_y_paddr = + vb2_dma_contig_plane_dma_addr(vb, 0); + buf_u_v_paddr = + vb2_dma_contig_plane_dma_addr(vb, 1); + amvdec_write_dos(core, HEVC_SAO_Y_START_ADDR, buf_y_paddr); + amvdec_write_dos(core, HEVC_SAO_C_START_ADDR, buf_u_v_paddr); + amvdec_write_dos(core, HEVC_SAO_Y_WPTR, buf_y_paddr); + amvdec_write_dos(core, HEVC_SAO_C_WPTR, buf_u_v_paddr); + } + + if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap, + vp9->is_10bit)) { + amvdec_write_dos(core, HEVC_CM_HEADER_START_ADDR, + vp9->common.mmu_header_paddr[vb->index]); + /* use HEVC_CM_HEADER_START_ADDR */ + amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(10)); + } + + amvdec_write_dos(core, HEVC_SAO_Y_LENGTH, + amvdec_get_output_size(sess)); + amvdec_write_dos(core, HEVC_SAO_C_LENGTH, + (amvdec_get_output_size(sess) / 2)); + + if (core->platform->revision >= VDEC_REVISION_G12A) { + amvdec_clear_dos_bits(core, HEVC_DBLK_CFGB, + BIT(4) | BIT(5) | BIT(8) | BIT(9)); + /* enable first, compressed write */ + if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) + amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(8)); + + /* enable second, uncompressed write */ + if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) + amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(9)); + + /* dblk pipeline mode=1 for performance */ + if (sess->width >= 1280) + amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(4)); + + pr_debug("HEVC_DBLK_CFGB: %08X\n", + amvdec_read_dos(core, HEVC_DBLK_CFGB)); + } + + val = amvdec_read_dos(core, HEVC_SAO_CTRL1) & ~0x3ff0; + val |= 0xff0; /* Set endianness for 2-bytes swaps (nv12) */ + if (core->platform->revision < VDEC_REVISION_G12A) { + val &= ~0x3; + if (!codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) + val |= BIT(0); /* disable cm compression */ + /* TOFIX: Handle Amlogic Framebuffer compression */ + } + + amvdec_write_dos(core, HEVC_SAO_CTRL1, val); + pr_debug("HEVC_SAO_CTRL1: %08X\n", val); + + /* no downscale for NV12 */ + val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0000; + amvdec_write_dos(core, HEVC_SAO_CTRL5, val); + + val = amvdec_read_dos(core, HEVCD_IPP_AXIIF_CONFIG) & ~0x30; + val |= 0xf; + val &= ~BIT(12); /* NV12 */ + amvdec_write_dos(core, HEVCD_IPP_AXIIF_CONFIG, val); +} + +static dma_addr_t codec_vp9_get_frame_mv_paddr(struct codec_vp9 *vp9, + struct vp9_frame *frame) +{ + return vp9->workspace_paddr + MPRED_MV_OFFSET + + (frame->index * MPRED_MV_BUF_SIZE); +} + +static void codec_vp9_set_mpred_mv(struct amvdec_core *core, + struct codec_vp9 *vp9) +{ + int mpred_mv_rd_end_addr; + int use_prev_frame_mvs = vp9->prev_frame->width == + vp9->cur_frame->width && + vp9->prev_frame->height == + vp9->cur_frame->height && + !vp9->prev_frame->intra_only && + vp9->prev_frame->show && + vp9->prev_frame->type != KEY_FRAME; + + amvdec_write_dos(core, HEVC_MPRED_CTRL3, 0x24122412); + amvdec_write_dos(core, HEVC_MPRED_ABV_START_ADDR, + vp9->workspace_paddr + MPRED_ABV_OFFSET); + + amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6)); + if (use_prev_frame_mvs) + amvdec_write_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6)); + + amvdec_write_dos(core, HEVC_MPRED_MV_WR_START_ADDR, + codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame)); + amvdec_write_dos(core, HEVC_MPRED_MV_WPTR, + codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame)); + + amvdec_write_dos(core, HEVC_MPRED_MV_RD_START_ADDR, + codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame)); + amvdec_write_dos(core, HEVC_MPRED_MV_RPTR, + codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame)); + + mpred_mv_rd_end_addr = + codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame) + + (vp9->lcu_total * MV_MEM_UNIT); + amvdec_write_dos(core, HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); +} + +static void codec_vp9_update_next_ref(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + u32 buf_idx = vp9->cur_frame->index; + int ref_index = 0; + int refresh_frame_flags; + int mask; + + refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ? + 0xff : param->p.refresh_frame_flags; + + for (mask = refresh_frame_flags; mask; mask >>= 1) { + pr_debug("mask=%08X; ref_index=%d\n", mask, ref_index); + if (mask & 1) + vp9->next_ref_frame_map[ref_index] = buf_idx; + else + vp9->next_ref_frame_map[ref_index] = + vp9->ref_frame_map[ref_index]; + + ++ref_index; + } + + for (; ref_index < REF_FRAMES; ++ref_index) + vp9->next_ref_frame_map[ref_index] = + vp9->ref_frame_map[ref_index]; +} + +static void codec_vp9_save_refs(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + int i; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + const int ref = (param->p.ref_info >> + (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7; + + if (vp9->ref_frame_map[ref] < 0) + continue; + + pr_warn("%s: FIXME, would need to save ref %d\n", + __func__, vp9->ref_frame_map[ref]); + } +} + +static void codec_vp9_update_ref(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + int ref_index = 0; + int mask; + int refresh_frame_flags; + + if (!vp9->cur_frame) + return; + + refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ? + 0xff : param->p.refresh_frame_flags; + + for (mask = refresh_frame_flags; mask; mask >>= 1) { + vp9->ref_frame_map[ref_index] = + vp9->next_ref_frame_map[ref_index]; + ++ref_index; + } + + if (param->p.show_existing_frame) + return; + + for (; ref_index < REF_FRAMES; ++ref_index) + vp9->ref_frame_map[ref_index] = + vp9->next_ref_frame_map[ref_index]; +} + +static struct vp9_frame *codec_vp9_get_frame_by_idx(struct codec_vp9 *vp9, + int idx) +{ + struct vp9_frame *frame; + + list_for_each_entry(frame, &vp9->ref_frames_list, list) { + if (frame->index == idx) + return frame; + } + + return NULL; +} + +static void codec_vp9_sync_ref(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + int i; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + const int ref = (param->p.ref_info >> + (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7; + const int idx = vp9->ref_frame_map[ref]; + + vp9->frame_refs[i] = codec_vp9_get_frame_by_idx(vp9, idx); + if (!vp9->frame_refs[i]) + pr_warn("%s: couldn't find VP9 ref %d\n", __func__, + idx); + } +} + +static void codec_vp9_set_refs(struct amvdec_session *sess, + struct codec_vp9 *vp9) +{ + struct amvdec_core *core = sess->core; + int i; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct vp9_frame *frame = vp9->frame_refs[i]; + int id_y; + int id_u_v; + + if (!frame) + continue; + + if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) { + id_y = frame->index; + id_u_v = id_y; + } else { + id_y = frame->index * 2; + id_u_v = id_y + 1; + } + + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (id_u_v << 16) | (id_u_v << 8) | id_y); + } +} + +static void codec_vp9_set_mc(struct amvdec_session *sess, + struct codec_vp9 *vp9) +{ + struct amvdec_core *core = sess->core; + u32 scale = 0; + u32 sz; + int i; + + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1); + codec_vp9_set_refs(sess, vp9); + amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | 1); + codec_vp9_set_refs(sess, vp9); + + amvdec_write_dos(core, VP9D_MPP_REFINFO_TBL_ACCCONFIG, BIT(2)); + for (i = 0; i < REFS_PER_FRAME; ++i) { + if (!vp9->frame_refs[i]) + continue; + + if (vp9->frame_refs[i]->width != vp9->width || + vp9->frame_refs[i]->height != vp9->height) + scale = 1; + + sz = amvdec_am21c_body_size(vp9->frame_refs[i]->width, + vp9->frame_refs[i]->height); + + amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, + vp9->frame_refs[i]->width); + amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, + vp9->frame_refs[i]->height); + amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, + (vp9->frame_refs[i]->width << 14) / + vp9->width); + amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, + (vp9->frame_refs[i]->height << 14) / + vp9->height); + amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, sz >> 5); + } + + amvdec_write_dos(core, VP9D_MPP_REF_SCALE_ENBL, scale); +} + +static struct vp9_frame *codec_vp9_get_new_frame(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + union rpm_param *param = &vp9->rpm_param; + struct vb2_v4l2_buffer *vbuf; + struct vp9_frame *new_frame; + + new_frame = kzalloc(sizeof(*new_frame), GFP_KERNEL); + if (!new_frame) + return NULL; + + vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx); + if (!vbuf) { + dev_err(sess->core->dev, "No dst buffer available\n"); + kfree(new_frame); + return NULL; + } + + while (codec_vp9_get_frame_by_idx(vp9, vbuf->vb2_buf.index)) { + struct vb2_v4l2_buffer *old_vbuf = vbuf; + + vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx); + v4l2_m2m_buf_queue(sess->m2m_ctx, old_vbuf); + if (!vbuf) { + dev_err(sess->core->dev, "No dst buffer available\n"); + kfree(new_frame); + return NULL; + } + } + + new_frame->vbuf = vbuf; + new_frame->index = vbuf->vb2_buf.index; + new_frame->intra_only = param->p.intra_only; + new_frame->show = param->p.show_frame; + new_frame->type = param->p.frame_type; + new_frame->width = vp9->width; + new_frame->height = vp9->height; + list_add_tail(&new_frame->list, &vp9->ref_frames_list); + vp9->frames_num++; + + return new_frame; +} + +static void codec_vp9_show_existing_frame(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + + if (!param->p.show_existing_frame) + return; + + pr_debug("showing frame %u\n", param->p.frame_to_show_idx); +} + +static void codec_vp9_rm_noshow_frame(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + struct vp9_frame *tmp; + + list_for_each_entry(tmp, &vp9->ref_frames_list, list) { + if (tmp->show) + continue; + + pr_debug("rm noshow: %u\n", tmp->index); + v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf); + list_del(&tmp->list); + kfree(tmp); + vp9->frames_num--; + return; + } +} + +static void codec_vp9_process_frame(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_vp9 *vp9 = sess->priv; + union rpm_param *param = &vp9->rpm_param; + int intra_only; + + if (!param->p.show_frame) + codec_vp9_rm_noshow_frame(sess); + + vp9->cur_frame = codec_vp9_get_new_frame(sess); + if (!vp9->cur_frame) + return; + + pr_debug("frame %d: type: %08X; show_exist: %u; show: %u, intra_only: %u\n", + vp9->cur_frame->index, + param->p.frame_type, param->p.show_existing_frame, + param->p.show_frame, param->p.intra_only); + + if (param->p.frame_type != KEY_FRAME) + codec_vp9_sync_ref(vp9); + codec_vp9_update_next_ref(vp9); + codec_vp9_show_existing_frame(vp9); + + if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap, + vp9->is_10bit)) + codec_hevc_fill_mmu_map(sess, &vp9->common, + &vp9->cur_frame->vbuf->vb2_buf); + + intra_only = param->p.show_frame ? 0 : param->p.intra_only; + + /* clear mpred (for keyframe only) */ + if (param->p.frame_type != KEY_FRAME && !intra_only) { + codec_vp9_set_mc(sess, vp9); + codec_vp9_set_mpred_mv(core, vp9); + } else { + amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6)); + } + + amvdec_write_dos(core, HEVC_PARSER_PICTURE_SIZE, + (vp9->height << 16) | vp9->width); + codec_vp9_set_sao(sess, &vp9->cur_frame->vbuf->vb2_buf); + + vp9_loop_filter_frame_init(core, &vp9->seg_4lf, + &vp9->lfi, &vp9->lf, + vp9->default_filt_lvl); + + /* ask uCode to start decoding */ + amvdec_write_dos(core, VP9_DEC_STATUS_REG, VP9_10B_DECODE_SLICE); +} + +static void codec_vp9_process_lf(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + int i; + + vp9->lf.mode_ref_delta_enabled = param->p.mode_ref_delta_enabled; + vp9->lf.sharpness_level = param->p.sharpness_level; + vp9->default_filt_lvl = param->p.filter_level; + vp9->seg_4lf.enabled = param->p.seg_enabled; + vp9->seg_4lf.abs_delta = param->p.seg_abs_delta; + + for (i = 0; i < 4; i++) + vp9->lf.ref_deltas[i] = param->p.ref_deltas[i]; + + for (i = 0; i < 2; i++) + vp9->lf.mode_deltas[i] = param->p.mode_deltas[i]; + + for (i = 0; i < MAX_SEGMENTS; i++) + vp9->seg_4lf.feature_mask[i] = + (param->p.seg_lf_info[i] & 0x8000) ? + (1 << SEG_LVL_ALT_LF) : 0; + + for (i = 0; i < MAX_SEGMENTS; i++) + vp9->seg_4lf.feature_data[i][SEG_LVL_ALT_LF] = + (param->p.seg_lf_info[i] & 0x100) ? + -(param->p.seg_lf_info[i] & 0x3f) + : (param->p.seg_lf_info[i] & 0x3f); +} + +static void codec_vp9_resume(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + + mutex_lock(&vp9->lock); + if (codec_hevc_setup_buffers(sess, &vp9->common, vp9->is_10bit)) { + mutex_unlock(&vp9->lock); + amvdec_abort(sess); + return; + } + + codec_vp9_setup_workspace(sess, vp9); + codec_hevc_setup_decode_head(sess, vp9->is_10bit); + codec_vp9_process_lf(vp9); + codec_vp9_process_frame(sess); + + mutex_unlock(&vp9->lock); +} + +/* + * The RPM section within the workspace contains + * many information regarding the parsed bitstream + */ +static void codec_vp9_fetch_rpm(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + u16 *rpm_vaddr = vp9->workspace_vaddr + RPM_OFFSET; + int i, j; + + for (i = 0; i < RPM_BUF_SIZE; i += 4) + for (j = 0; j < 4; j++) + vp9->rpm_param.l.data[i + j] = rpm_vaddr[i + 3 - j]; +} + +static int codec_vp9_process_rpm(struct codec_vp9 *vp9) +{ + union rpm_param *param = &vp9->rpm_param; + int src_changed = 0; + int is_10bit = 0; + int pic_width_64 = ALIGN(param->p.width, 64); + int pic_height_32 = ALIGN(param->p.height, 32); + int pic_width_lcu = (pic_width_64 % LCU_SIZE) ? + pic_width_64 / LCU_SIZE + 1 + : pic_width_64 / LCU_SIZE; + int pic_height_lcu = (pic_height_32 % LCU_SIZE) ? + pic_height_32 / LCU_SIZE + 1 + : pic_height_32 / LCU_SIZE; + vp9->lcu_total = pic_width_lcu * pic_height_lcu; + + if (param->p.bit_depth == 10) + is_10bit = 1; + + if (vp9->width != param->p.width || vp9->height != param->p.height || + vp9->is_10bit != is_10bit) + src_changed = 1; + + vp9->width = param->p.width; + vp9->height = param->p.height; + vp9->is_10bit = is_10bit; + + pr_debug("width: %u; height: %u; is_10bit: %d; src_changed: %d\n", + vp9->width, vp9->height, is_10bit, src_changed); + + return src_changed; +} + +static bool codec_vp9_is_ref(struct codec_vp9 *vp9, struct vp9_frame *frame) +{ + int i; + + for (i = 0; i < REF_FRAMES; ++i) + if (vp9->ref_frame_map[i] == frame->index) + return true; + + return false; +} + +static void codec_vp9_show_frame(struct amvdec_session *sess) +{ + struct codec_vp9 *vp9 = sess->priv; + struct vp9_frame *tmp, *n; + + list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) { + if (!tmp->show || tmp == vp9->cur_frame) + continue; + + if (!tmp->done) { + pr_debug("Doning %u\n", tmp->index); + amvdec_dst_buf_done(sess, tmp->vbuf, V4L2_FIELD_NONE); + tmp->done = 1; + vp9->frames_num--; + } + + if (codec_vp9_is_ref(vp9, tmp) || tmp == vp9->prev_frame) + continue; + + pr_debug("deleting %d\n", tmp->index); + list_del(&tmp->list); + kfree(tmp); + } +} + +static void vp9_tree_merge_probs(unsigned int *prev_prob, + unsigned int *cur_prob, + int coef_node_start, int tree_left, + int tree_right, + int tree_i, int node) +{ + int prob_32, prob_res, prob_shift; + int pre_prob, new_prob; + int den, m_count, get_prob, factor; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + den = tree_left + tree_right; + + if (den == 0) { + new_prob = pre_prob; + } else { + m_count = den < MODE_MV_COUNT_SAT ? den : MODE_MV_COUNT_SAT; + get_prob = + clip_prob(div_r32(((int64_t)tree_left * 256 + + (den >> 1)), + den)); + + /* weighted_prob */ + factor = count_to_update_factor[m_count]; + new_prob = round_power_of_two(pre_prob * (256 - factor) + + get_prob * factor, 8); + } + + cur_prob[coef_node_start / 4 * 2] = + (cur_prob[coef_node_start / 4 * 2] & (~(0xff << prob_shift))) | + (new_prob << prob_shift); +} + +static void adapt_coef_probs_cxt(unsigned int *prev_prob, + unsigned int *cur_prob, + unsigned int *count, + int update_factor, + int cxt_num, + int coef_cxt_start, + int coef_count_cxt_start) +{ + int prob_32, prob_res, prob_shift; + int pre_prob, new_prob; + int num, den, m_count, get_prob, factor; + int node, coef_node_start; + int count_sat = 24; + int cxt; + + for (cxt = 0; cxt < cxt_num; cxt++) { + const int n0 = count[coef_count_cxt_start]; + const int n1 = count[coef_count_cxt_start + 1]; + const int n2 = count[coef_count_cxt_start + 2]; + const int neob = count[coef_count_cxt_start + 3]; + const int nneob = count[coef_count_cxt_start + 4]; + const unsigned int branch_ct[3][2] = { + { neob, nneob }, + { n0, n1 + n2 }, + { n1, n2 } + }; + + coef_node_start = coef_cxt_start; + for (node = 0 ; node < 3 ; node++) { + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + /* get binary prob */ + num = branch_ct[node][0]; + den = branch_ct[node][0] + branch_ct[node][1]; + m_count = den < count_sat ? den : count_sat; + + get_prob = (den == 0) ? + 128u : + clip_prob(div_r32(((int64_t)num * 256 + + (den >> 1)), den)); + + factor = update_factor * m_count / count_sat; + new_prob = + round_power_of_two(pre_prob * (256 - factor) + + get_prob * factor, 8); + + cur_prob[coef_node_start / 4 * 2] = + (cur_prob[coef_node_start / 4 * 2] & + (~(0xff << prob_shift))) | + (new_prob << prob_shift); + + coef_node_start += 1; + } + + coef_cxt_start = coef_cxt_start + 3; + coef_count_cxt_start = coef_count_cxt_start + 5; + } +} + +static void adapt_coef_probs(int prev_kf, int cur_kf, int pre_fc, + unsigned int *prev_prob, unsigned int *cur_prob, + unsigned int *count) +{ + int tx_size, coef_tx_size_start, coef_count_tx_size_start; + int plane, coef_plane_start, coef_count_plane_start; + int type, coef_type_start, coef_count_type_start; + int band, coef_band_start, coef_count_band_start; + int cxt_num; + int coef_cxt_start, coef_count_cxt_start; + int node, coef_node_start, coef_count_node_start; + + int tree_i, tree_left, tree_right; + int mvd_i; + + int update_factor = cur_kf ? 112 : (prev_kf ? 128 : 112); + + int prob_32; + int prob_res; + int prob_shift; + int pre_prob; + + int den; + int get_prob; + int m_count; + int factor; + + int new_prob; + + for (tx_size = 0 ; tx_size < 4 ; tx_size++) { + coef_tx_size_start = VP9_COEF_START + + tx_size * 4 * VP9_COEF_SIZE_ONE_SET; + coef_count_tx_size_start = VP9_COEF_COUNT_START + + tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET; + coef_plane_start = coef_tx_size_start; + coef_count_plane_start = coef_count_tx_size_start; + + for (plane = 0 ; plane < 2 ; plane++) { + coef_type_start = coef_plane_start; + coef_count_type_start = coef_count_plane_start; + + for (type = 0 ; type < 2 ; type++) { + coef_band_start = coef_type_start; + coef_count_band_start = coef_count_type_start; + + for (band = 0 ; band < 6 ; band++) { + if (band == 0) + cxt_num = 3; + else + cxt_num = 6; + coef_cxt_start = coef_band_start; + coef_count_cxt_start = + coef_count_band_start; + + adapt_coef_probs_cxt(prev_prob, + cur_prob, + count, + update_factor, + cxt_num, + coef_cxt_start, + coef_count_cxt_start); + + if (band == 0) { + coef_band_start += 10; + coef_count_band_start += 15; + } else { + coef_band_start += 18; + coef_count_band_start += 30; + } + } + coef_type_start += VP9_COEF_SIZE_ONE_SET; + coef_count_type_start += + VP9_COEF_COUNT_SIZE_ONE_SET; + } + + coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET; + coef_count_plane_start += + 2 * VP9_COEF_COUNT_SIZE_ONE_SET; + } + } + + if (cur_kf == 0) { + /* mode_mv_merge_probs - merge_intra_inter_prob */ + for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START; + coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START + + VP9_MV_CLASS0_HP_1_COUNT_SIZE); + coef_count_node_start += 2) { + if (coef_count_node_start == + VP9_INTRA_INTER_COUNT_START) + coef_node_start = VP9_INTRA_INTER_START; + else if (coef_count_node_start == + VP9_COMP_INTER_COUNT_START) + coef_node_start = VP9_COMP_INTER_START; + else if (coef_count_node_start == + VP9_TX_MODE_COUNT_START) + coef_node_start = VP9_TX_MODE_START; + else if (coef_count_node_start == + VP9_SKIP_COUNT_START) + coef_node_start = VP9_SKIP_START; + else if (coef_count_node_start == + VP9_MV_SIGN_0_COUNT_START) + coef_node_start = VP9_MV_SIGN_0_START; + else if (coef_count_node_start == + VP9_MV_SIGN_1_COUNT_START) + coef_node_start = VP9_MV_SIGN_1_START; + else if (coef_count_node_start == + VP9_MV_BITS_0_COUNT_START) + coef_node_start = VP9_MV_BITS_0_START; + else if (coef_count_node_start == + VP9_MV_BITS_1_COUNT_START) + coef_node_start = VP9_MV_BITS_1_START; + else if (coef_count_node_start == + VP9_MV_CLASS0_HP_0_COUNT_START) + coef_node_start = VP9_MV_CLASS0_HP_0_START; + + den = count[coef_count_node_start] + + count[coef_count_node_start + 1]; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + if (den == 0) { + new_prob = pre_prob; + } else { + m_count = den < MODE_MV_COUNT_SAT ? + den : MODE_MV_COUNT_SAT; + get_prob = + clip_prob(div_r32(((int64_t) + count[coef_count_node_start] * 256 + + (den >> 1)), + den)); + + /* weighted prob */ + factor = count_to_update_factor[m_count]; + new_prob = + round_power_of_two(pre_prob * + (256 - factor) + + get_prob * factor, + 8); + } + + cur_prob[coef_node_start / 4 * 2] = + (cur_prob[coef_node_start / 4 * 2] & + (~(0xff << prob_shift))) | + (new_prob << prob_shift); + + coef_node_start = coef_node_start + 1; + } + + coef_node_start = VP9_INTER_MODE_START; + coef_count_node_start = VP9_INTER_MODE_COUNT_START; + for (tree_i = 0 ; tree_i < 7 ; tree_i++) { + for (node = 0 ; node < 3 ; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 2: + tree_left = count[start + 1]; + tree_right = count[start + 3]; + break; + case 1: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 3]; + break; + default: + tree_left = count[start + 2]; + tree_right = count[start + 0] + + count[start + 1] + + count[start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + + coef_count_node_start = coef_count_node_start + 4; + } + + coef_node_start = VP9_IF_Y_MODE_START; + coef_count_node_start = VP9_IF_Y_MODE_COUNT_START; + for (tree_i = 0 ; tree_i < 14 ; tree_i++) { + for (node = 0 ; node < 9 ; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 8: + tree_left = + count[start + D153_PRED]; + tree_right = + count[start + D207_PRED]; + break; + case 7: + tree_left = + count[start + D63_PRED]; + tree_right = + count[start + D207_PRED] + + count[start + D153_PRED]; + break; + case 6: + tree_left = + count[start + D45_PRED]; + tree_right = + count[start + D207_PRED] + + count[start + D153_PRED] + + count[start + D63_PRED]; + break; + case 5: + tree_left = + count[start + D135_PRED]; + tree_right = + count[start + D117_PRED]; + break; + case 4: + tree_left = + count[start + H_PRED]; + tree_right = + count[start + D117_PRED] + + count[start + D135_PRED]; + break; + case 3: + tree_left = + count[start + H_PRED] + + count[start + D117_PRED] + + count[start + D135_PRED]; + tree_right = + count[start + D45_PRED] + + count[start + D207_PRED] + + count[start + D153_PRED] + + count[start + D63_PRED]; + break; + case 2: + tree_left = + count[start + V_PRED]; + tree_right = + count[start + H_PRED] + + count[start + D117_PRED] + + count[start + D135_PRED] + + count[start + D45_PRED] + + count[start + D207_PRED] + + count[start + D153_PRED] + + count[start + D63_PRED]; + break; + case 1: + tree_left = + count[start + TM_PRED]; + tree_right = + count[start + V_PRED] + + count[start + H_PRED] + + count[start + D117_PRED] + + count[start + D135_PRED] + + count[start + D45_PRED] + + count[start + D207_PRED] + + count[start + D153_PRED] + + count[start + D63_PRED]; + break; + default: + tree_left = + count[start + DC_PRED]; + tree_right = + count[start + TM_PRED] + + count[start + V_PRED] + + count[start + H_PRED] + + count[start + D117_PRED] + + count[start + D135_PRED] + + count[start + D45_PRED] + + count[start + D207_PRED] + + count[start + D153_PRED] + + count[start + D63_PRED]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 10; + } + + coef_node_start = VP9_PARTITION_P_START; + coef_count_node_start = VP9_PARTITION_P_COUNT_START; + for (tree_i = 0 ; tree_i < 16 ; tree_i++) { + for (node = 0 ; node < 3 ; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 2: + tree_left = count[start + 2]; + tree_right = count[start + 3]; + break; + case 1: + tree_left = count[start + 1]; + tree_right = count[start + 2] + + count[start + 3]; + break; + default: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 2] + + count[start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + + coef_count_node_start = coef_count_node_start + 4; + } + + coef_node_start = VP9_INTERP_START; + coef_count_node_start = VP9_INTERP_COUNT_START; + for (tree_i = 0 ; tree_i < 4 ; tree_i++) { + for (node = 0 ; node < 2 ; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 1: + tree_left = count[start + 1]; + tree_right = count[start + 2]; + break; + default: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 2]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 3; + } + + coef_node_start = VP9_MV_JOINTS_START; + coef_count_node_start = VP9_MV_JOINTS_COUNT_START; + for (tree_i = 0 ; tree_i < 1 ; tree_i++) { + for (node = 0 ; node < 3 ; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 2: + tree_left = count[start + 2]; + tree_right = count[start + 3]; + break; + case 1: + tree_left = count[start + 1]; + tree_right = count[start + 2] + + count[start + 3]; + break; + default: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 2] + + count[start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + for (mvd_i = 0 ; mvd_i < 2 ; mvd_i++) { + coef_node_start = mvd_i ? VP9_MV_CLASSES_1_START : + VP9_MV_CLASSES_0_START; + coef_count_node_start = mvd_i ? + VP9_MV_CLASSES_1_COUNT_START : + VP9_MV_CLASSES_0_COUNT_START; + tree_i = 0; + for (node = 0; node < 10; node++) { + unsigned int start = coef_count_node_start; + + switch (node) { + case 9: + tree_left = count[start + 9]; + tree_right = count[start + 10]; + break; + case 8: + tree_left = count[start + 7]; + tree_right = count[start + 8]; + break; + case 7: + tree_left = count[start + 7] + + count[start + 8]; + tree_right = count[start + 9] + + count[start + 10]; + break; + case 6: + tree_left = count[start + 6]; + tree_right = count[start + 7] + + count[start + 8] + + count[start + 9] + + count[start + 10]; + break; + case 5: + tree_left = count[start + 4]; + tree_right = count[start + 5]; + break; + case 4: + tree_left = count[start + 4] + + count[start + 5]; + tree_right = count[start + 6] + + count[start + 7] + + count[start + 8] + + count[start + 9] + + count[start + 10]; + break; + case 3: + tree_left = count[start + 2]; + tree_right = count[start + 3]; + break; + case 2: + tree_left = count[start + 2] + + count[start + 3]; + tree_right = count[start + 4] + + count[start + 5] + + count[start + 6] + + count[start + 7] + + count[start + 8] + + count[start + 9] + + count[start + 10]; + break; + case 1: + tree_left = count[start + 1]; + tree_right = count[start + 2] + + count[start + 3] + + count[start + 4] + + count[start + 5] + + count[start + 6] + + count[start + 7] + + count[start + 8] + + count[start + 9] + + count[start + 10]; + break; + default: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 2] + + count[start + 3] + + count[start + 4] + + count[start + 5] + + count[start + 6] + + count[start + 7] + + count[start + 8] + + count[start + 9] + + count[start + 10]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + + coef_node_start = mvd_i ? VP9_MV_CLASS0_1_START : + VP9_MV_CLASS0_0_START; + coef_count_node_start = mvd_i ? + VP9_MV_CLASS0_1_COUNT_START : + VP9_MV_CLASS0_0_COUNT_START; + tree_i = 0; + node = 0; + tree_left = count[coef_count_node_start + 0]; + tree_right = count[coef_count_node_start + 1]; + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, + tree_i, node); + coef_node_start = mvd_i ? VP9_MV_CLASS0_FP_1_START : + VP9_MV_CLASS0_FP_0_START; + coef_count_node_start = mvd_i ? + VP9_MV_CLASS0_FP_1_COUNT_START : + VP9_MV_CLASS0_FP_0_COUNT_START; + + for (tree_i = 0; tree_i < 3; tree_i++) { + for (node = 0; node < 3; node++) { + unsigned int start = + coef_count_node_start; + switch (node) { + case 2: + tree_left = count[start + 2]; + tree_right = count[start + 3]; + break; + case 1: + tree_left = count[start + 1]; + tree_right = count[start + 2] + + count[start + 3]; + break; + default: + tree_left = count[start + 0]; + tree_right = count[start + 1] + + count[start + 2] + + count[start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, + cur_prob, + coef_node_start, + tree_left, + tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = + coef_count_node_start + 4; + } + } + } +} + +static irqreturn_t codec_vp9_threaded_isr(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct codec_vp9 *vp9 = sess->priv; + u32 dec_status = amvdec_read_dos(core, VP9_DEC_STATUS_REG); + u32 prob_status = amvdec_read_dos(core, VP9_ADAPT_PROB_REG); + int i; + + if (!vp9) + return IRQ_HANDLED; + + mutex_lock(&vp9->lock); + if (dec_status != VP9_HEAD_PARSER_DONE) { + dev_err(core->dev_dec, "Unrecognized dec_status: %08X\n", + dec_status); + amvdec_abort(sess); + goto unlock; + } + + pr_debug("ISR: %08X;%08X\n", dec_status, prob_status); + sess->keyframe_found = 1; + + if ((prob_status & 0xff) == 0xfd && vp9->cur_frame) { + /* VP9_REQ_ADAPT_PROB */ + u8 *prev_prob_b = ((u8 *)vp9->workspace_vaddr + + PROB_OFFSET) + + ((prob_status >> 8) * 0x1000); + u8 *cur_prob_b = ((u8 *)vp9->workspace_vaddr + + PROB_OFFSET) + 0x4000; + u8 *count_b = (u8 *)vp9->workspace_vaddr + + COUNT_OFFSET; + int last_frame_type = vp9->prev_frame ? + vp9->prev_frame->type : + KEY_FRAME; + + adapt_coef_probs(last_frame_type == KEY_FRAME, + vp9->cur_frame->type == KEY_FRAME ? 1 : 0, + prob_status >> 8, + (unsigned int *)prev_prob_b, + (unsigned int *)cur_prob_b, + (unsigned int *)count_b); + + memcpy(prev_prob_b, cur_prob_b, ADAPT_PROB_SIZE); + amvdec_write_dos(core, VP9_ADAPT_PROB_REG, 0); + } + + /* Invalidate first 3 refs */ + for (i = 0; i < REFS_PER_FRAME ; ++i) + vp9->frame_refs[i] = NULL; + + vp9->prev_frame = vp9->cur_frame; + codec_vp9_update_ref(vp9); + + codec_vp9_fetch_rpm(sess); + if (codec_vp9_process_rpm(vp9)) { + amvdec_src_change(sess, vp9->width, vp9->height, 16); + + /* No frame is actually processed */ + vp9->cur_frame = NULL; + + /* Show the remaining frame */ + codec_vp9_show_frame(sess); + + /* FIXME: Save refs for resized frame */ + if (vp9->frames_num) + codec_vp9_save_refs(vp9); + + goto unlock; + } + + codec_vp9_process_lf(vp9); + codec_vp9_process_frame(sess); + codec_vp9_show_frame(sess); + +unlock: + mutex_unlock(&vp9->lock); + return IRQ_HANDLED; +} + +static irqreturn_t codec_vp9_isr(struct amvdec_session *sess) +{ + return IRQ_WAKE_THREAD; +} + +struct amvdec_codec_ops codec_vp9_ops = { + .start = codec_vp9_start, + .stop = codec_vp9_stop, + .isr = codec_vp9_isr, + .threaded_isr = codec_vp9_threaded_isr, + .num_pending_bufs = codec_vp9_num_pending_bufs, + .drain = codec_vp9_flush_output, + .resume = codec_vp9_resume, +}; diff --git a/drivers/staging/media/meson/vdec/codec_vp9.h b/drivers/staging/media/meson/vdec/codec_vp9.h new file mode 100644 index 000000000000..62db65a2b939 --- /dev/null +++ b/drivers/staging/media/meson/vdec/codec_vp9.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr> + */ + +#ifndef __MESON_VDEC_CODEC_VP9_H_ +#define __MESON_VDEC_CODEC_VP9_H_ + +#include "vdec.h" + +extern struct amvdec_codec_ops codec_vp9_ops; + +#endif diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c index 95102a4bdc62..db7022707ff8 100644 --- a/drivers/staging/media/meson/vdec/esparser.c +++ b/drivers/staging/media/meson/vdec/esparser.c @@ -52,6 +52,7 @@ #define PARSER_VIDEO_HOLE 0x90 #define SEARCH_PATTERN_LEN 512 +#define VP9_HEADER_SIZE 16 static DECLARE_WAIT_QUEUE_HEAD(wq); static int search_done; @@ -74,27 +75,140 @@ static irqreturn_t esparser_isr(int irq, void *dev) return IRQ_HANDLED; } +/* + * VP9 frame headers need to be appended by a 16-byte long + * Amlogic custom header + */ +static int vp9_update_header(struct amvdec_core *core, struct vb2_buffer *buf) +{ + u8 *dp; + u8 marker; + int dsize; + int num_frames, cur_frame; + int cur_mag, mag, mag_ptr; + int frame_size[8], tot_frame_size[8]; + int total_datasize = 0; + int new_frame_size; + unsigned char *old_header = NULL; + + dp = (uint8_t *)vb2_plane_vaddr(buf, 0); + dsize = vb2_get_plane_payload(buf, 0); + + if (dsize == vb2_plane_size(buf, 0)) { + dev_warn(core->dev, "%s: unable to update header\n", __func__); + return 0; + } + + marker = dp[dsize - 1]; + if ((marker & 0xe0) == 0xc0) { + num_frames = (marker & 0x7) + 1; + mag = ((marker >> 3) & 0x3) + 1; + mag_ptr = dsize - mag * num_frames - 2; + if (dp[mag_ptr] != marker) + return 0; + + mag_ptr++; + for (cur_frame = 0; cur_frame < num_frames; cur_frame++) { + frame_size[cur_frame] = 0; + for (cur_mag = 0; cur_mag < mag; cur_mag++) { + frame_size[cur_frame] |= + (dp[mag_ptr] << (cur_mag * 8)); + mag_ptr++; + } + if (cur_frame == 0) + tot_frame_size[cur_frame] = + frame_size[cur_frame]; + else + tot_frame_size[cur_frame] = + tot_frame_size[cur_frame - 1] + + frame_size[cur_frame]; + total_datasize += frame_size[cur_frame]; + } + } else { + num_frames = 1; + frame_size[0] = dsize; + tot_frame_size[0] = dsize; + total_datasize = dsize; + } + + new_frame_size = total_datasize + num_frames * VP9_HEADER_SIZE; + + if (new_frame_size >= vb2_plane_size(buf, 0)) { + dev_warn(core->dev, "%s: unable to update header\n", __func__); + return 0; + } + + for (cur_frame = num_frames - 1; cur_frame >= 0; cur_frame--) { + int framesize = frame_size[cur_frame]; + int framesize_header = framesize + 4; + int oldframeoff = tot_frame_size[cur_frame] - framesize; + int outheaderoff = oldframeoff + cur_frame * VP9_HEADER_SIZE; + u8 *fdata = dp + outheaderoff; + u8 *old_framedata = dp + oldframeoff; + + memmove(fdata + VP9_HEADER_SIZE, old_framedata, framesize); + + fdata[0] = (framesize_header >> 24) & 0xff; + fdata[1] = (framesize_header >> 16) & 0xff; + fdata[2] = (framesize_header >> 8) & 0xff; + fdata[3] = (framesize_header >> 0) & 0xff; + fdata[4] = ((framesize_header >> 24) & 0xff) ^ 0xff; + fdata[5] = ((framesize_header >> 16) & 0xff) ^ 0xff; + fdata[6] = ((framesize_header >> 8) & 0xff) ^ 0xff; + fdata[7] = ((framesize_header >> 0) & 0xff) ^ 0xff; + fdata[8] = 0; + fdata[9] = 0; + fdata[10] = 0; + fdata[11] = 1; + fdata[12] = 'A'; + fdata[13] = 'M'; + fdata[14] = 'L'; + fdata[15] = 'V'; + + if (!old_header) { + /* nothing */ + } else if (old_header > fdata + 16 + framesize) { + dev_dbg(core->dev, "%s: data has gaps, setting to 0\n", + __func__); + memset(fdata + 16 + framesize, 0, + (old_header - fdata + 16 + framesize)); + } else if (old_header < fdata + 16 + framesize) { + dev_err(core->dev, "%s: data overwritten\n", __func__); + } + old_header = fdata; + } + + return new_frame_size; +} + /* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger * ISRs. * Also append a start code 000001ff at the end to trigger * the ESPARSER interrupt. */ -static u32 esparser_pad_start_code(struct vb2_buffer *vb) +static u32 esparser_pad_start_code(struct amvdec_core *core, + struct vb2_buffer *vb, + u32 payload_size) { - u32 payload_size = vb2_get_plane_payload(vb, 0); u32 pad_size = 0; - u8 *vaddr = vb2_plane_vaddr(vb, 0) + payload_size; + u8 *vaddr = vb2_plane_vaddr(vb, 0); if (payload_size < ESPARSER_MIN_PACKET_SIZE) { pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size; - memset(vaddr, 0, pad_size); + memset(vaddr + payload_size, 0, pad_size); + } + + if ((payload_size + pad_size + SEARCH_PATTERN_LEN) > + vb2_plane_size(vb, 0)) { + dev_warn(core->dev, "%s: unable to pad start code\n", __func__); + return pad_size; } - memset(vaddr + pad_size, 0, SEARCH_PATTERN_LEN); - vaddr[pad_size] = 0x00; - vaddr[pad_size + 1] = 0x00; - vaddr[pad_size + 2] = 0x01; - vaddr[pad_size + 3] = 0xff; + memset(vaddr + payload_size + pad_size, 0, SEARCH_PATTERN_LEN); + vaddr[payload_size + pad_size] = 0x00; + vaddr[payload_size + pad_size + 1] = 0x00; + vaddr[payload_size + pad_size + 2] = 0x01; + vaddr[payload_size + pad_size + 3] = 0xff; return pad_size; } @@ -181,30 +295,60 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf) struct vb2_buffer *vb = &vbuf->vb2_buf; struct amvdec_core *core = sess->core; struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; - u32 num_dst_bufs = 0; u32 payload_size = vb2_get_plane_payload(vb, 0); dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0); + u32 num_dst_bufs = 0; u32 offset; u32 pad_size; - if (codec_ops->num_pending_bufs) - num_dst_bufs = codec_ops->num_pending_bufs(sess); - - num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx); - - if (esparser_vififo_get_free_space(sess) < payload_size || - atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs) + /* + * When max ref frame is held by VP9, this should be -= 3 to prevent a + * shortage of CAPTURE buffers on the decoder side. + * For the future, a good enhancement of the way this is handled could + * be to notify new capture buffers to the decoding modules, so that + * they could pause when there is no capture buffer available and + * resume on this notification. + */ + if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) { + if (codec_ops->num_pending_bufs) + num_dst_bufs = codec_ops->num_pending_bufs(sess); + + num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx); + if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) + num_dst_bufs -= 3; + + if (esparser_vififo_get_free_space(sess) < payload_size || + atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs) + return -EAGAIN; + } else if (esparser_vififo_get_free_space(sess) < payload_size) { return -EAGAIN; + } v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf); offset = esparser_get_offset(sess); - amvdec_add_ts_reorder(sess, vb->timestamp, offset); - dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X\n", - vb->timestamp, payload_size, offset); + amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n", + vb->timestamp, payload_size, offset, vbuf->flags); + + vbuf->flags = 0; + vbuf->field = V4L2_FIELD_NONE; + vbuf->sequence = sess->sequence_out++; + + if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) { + payload_size = vp9_update_header(core, vb); - pad_size = esparser_pad_start_code(vb); + /* If unable to alter buffer to add headers */ + if (payload_size == 0) { + amvdec_remove_ts(sess, vb->timestamp); + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); + + return 0; + } + } + + pad_size = esparser_pad_start_code(core, vb, payload_size); ret = esparser_write_data(core, phy, payload_size + pad_size); if (ret <= 0) { @@ -216,19 +360,7 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf) return 0; } - /* We need to wait until we parse the first keyframe. - * All buffers prior to the first keyframe must be dropped. - */ - if (!sess->keyframe_found) - usleep_range(1000, 2000); - - if (sess->keyframe_found) - atomic_inc(&sess->esparser_queued_bufs); - else - amvdec_remove_ts(sess, vb->timestamp); - - vbuf->flags = 0; - vbuf->field = V4L2_FIELD_NONE; + atomic_inc(&sess->esparser_queued_bufs); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); return 0; diff --git a/drivers/staging/media/meson/vdec/hevc_regs.h b/drivers/staging/media/meson/vdec/hevc_regs.h new file mode 100644 index 000000000000..0392f41a1eed --- /dev/null +++ b/drivers/staging/media/meson/vdec/hevc_regs.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#ifndef __MESON_VDEC_HEVC_REGS_H_ +#define __MESON_VDEC_HEVC_REGS_H_ + +#define HEVC_ASSIST_MMU_MAP_ADDR 0xc024 + +#define HEVC_ASSIST_MBOX1_CLR_REG 0xc1d4 +#define HEVC_ASSIST_MBOX1_MASK 0xc1d8 + +#define HEVC_ASSIST_SCRATCH_0 0xc300 +#define HEVC_ASSIST_SCRATCH_1 0xc304 +#define HEVC_ASSIST_SCRATCH_2 0xc308 +#define HEVC_ASSIST_SCRATCH_3 0xc30c +#define HEVC_ASSIST_SCRATCH_4 0xc310 +#define HEVC_ASSIST_SCRATCH_5 0xc314 +#define HEVC_ASSIST_SCRATCH_6 0xc318 +#define HEVC_ASSIST_SCRATCH_7 0xc31c +#define HEVC_ASSIST_SCRATCH_8 0xc320 +#define HEVC_ASSIST_SCRATCH_9 0xc324 +#define HEVC_ASSIST_SCRATCH_A 0xc328 +#define HEVC_ASSIST_SCRATCH_B 0xc32c +#define HEVC_ASSIST_SCRATCH_C 0xc330 +#define HEVC_ASSIST_SCRATCH_D 0xc334 +#define HEVC_ASSIST_SCRATCH_E 0xc338 +#define HEVC_ASSIST_SCRATCH_F 0xc33c +#define HEVC_ASSIST_SCRATCH_G 0xc340 +#define HEVC_ASSIST_SCRATCH_H 0xc344 +#define HEVC_ASSIST_SCRATCH_I 0xc348 +#define HEVC_ASSIST_SCRATCH_J 0xc34c +#define HEVC_ASSIST_SCRATCH_K 0xc350 +#define HEVC_ASSIST_SCRATCH_L 0xc354 +#define HEVC_ASSIST_SCRATCH_M 0xc358 +#define HEVC_ASSIST_SCRATCH_N 0xc35c + +#define HEVC_PARSER_VERSION 0xc400 +#define HEVC_STREAM_CONTROL 0xc404 +#define HEVC_STREAM_START_ADDR 0xc408 +#define HEVC_STREAM_END_ADDR 0xc40c +#define HEVC_STREAM_WR_PTR 0xc410 +#define HEVC_STREAM_RD_PTR 0xc414 +#define HEVC_STREAM_LEVEL 0xc418 +#define HEVC_STREAM_FIFO_CTL 0xc41c +#define HEVC_SHIFT_CONTROL 0xc420 +#define HEVC_SHIFT_STARTCODE 0xc424 +#define HEVC_SHIFT_EMULATECODE 0xc428 +#define HEVC_SHIFT_STATUS 0xc42c +#define HEVC_SHIFTED_DATA 0xc430 +#define HEVC_SHIFT_BYTE_COUNT 0xc434 +#define HEVC_SHIFT_COMMAND 0xc438 +#define HEVC_ELEMENT_RESULT 0xc43c +#define HEVC_CABAC_CONTROL 0xc440 +#define HEVC_PARSER_SLICE_INFO 0xc444 +#define HEVC_PARSER_CMD_WRITE 0xc448 +#define HEVC_PARSER_CORE_CONTROL 0xc44c +#define HEVC_PARSER_CMD_FETCH 0xc450 +#define HEVC_PARSER_CMD_STATUS 0xc454 +#define HEVC_PARSER_LCU_INFO 0xc458 +#define HEVC_PARSER_HEADER_INFO 0xc45c +#define HEVC_PARSER_INT_CONTROL 0xc480 +#define HEVC_PARSER_INT_STATUS 0xc484 +#define HEVC_PARSER_IF_CONTROL 0xc488 +#define HEVC_PARSER_PICTURE_SIZE 0xc48c +#define HEVC_PARSER_LCU_START 0xc490 +#define HEVC_PARSER_HEADER_INFO2 0xc494 +#define HEVC_PARSER_QUANT_READ 0xc498 +#define HEVC_PARSER_RESERVED_27 0xc49c +#define HEVC_PARSER_CMD_SKIP_0 0xc4a0 +#define HEVC_PARSER_CMD_SKIP_1 0xc4a4 +#define HEVC_PARSER_CMD_SKIP_2 0xc4a8 +#define HEVC_SAO_IF_STATUS 0xc4c0 +#define HEVC_SAO_IF_DATA_Y 0xc4c4 +#define HEVC_SAO_IF_DATA_U 0xc4c8 +#define HEVC_SAO_IF_DATA_V 0xc4cc +#define HEVC_STREAM_SWAP_ADDR 0xc4d0 +#define HEVC_STREAM_SWAP_CTRL 0xc4d4 +#define HEVC_IQIT_IF_WAIT_CNT 0xc4d8 +#define HEVC_MPRED_IF_WAIT_CNT 0xc4dc +#define HEVC_SAO_IF_WAIT_CNT 0xc4e0 + +#define HEVC_MPRED_VERSION 0xc800 +#define HEVC_MPRED_CTRL0 0xc804 + #define MPRED_CTRL0_NEW_PIC BIT(2) + #define MPRED_CTRL0_NEW_TILE BIT(3) + #define MPRED_CTRL0_NEW_SLI_SEG BIT(4) + #define MPRED_CTRL0_TMVP BIT(5) + #define MPRED_CTRL0_LDC BIT(6) + #define MPRED_CTRL0_COL_FROM_L0 BIT(7) + #define MPRED_CTRL0_ABOVE_EN BIT(9) + #define MPRED_CTRL0_MV_WR_EN BIT(10) + #define MPRED_CTRL0_MV_RD_EN BIT(11) + #define MPRED_CTRL0_BUF_LINEAR BIT(13) +#define HEVC_MPRED_CTRL1 0xc808 +#define HEVC_MPRED_INT_EN 0xc80c +#define HEVC_MPRED_INT_STATUS 0xc810 +#define HEVC_MPRED_PIC_SIZE 0xc814 +#define HEVC_MPRED_PIC_SIZE_LCU 0xc818 +#define HEVC_MPRED_TILE_START 0xc81c +#define HEVC_MPRED_TILE_SIZE_LCU 0xc820 +#define HEVC_MPRED_REF_NUM 0xc824 +#define HEVC_MPRED_REF_EN_L0 0xc830 +#define HEVC_MPRED_REF_EN_L1 0xc834 +#define HEVC_MPRED_COLREF_EN_L0 0xc838 +#define HEVC_MPRED_COLREF_EN_L1 0xc83c +#define HEVC_MPRED_AXI_WCTRL 0xc840 +#define HEVC_MPRED_AXI_RCTRL 0xc844 +#define HEVC_MPRED_ABV_START_ADDR 0xc848 +#define HEVC_MPRED_MV_WR_START_ADDR 0xc84c +#define HEVC_MPRED_MV_RD_START_ADDR 0xc850 +#define HEVC_MPRED_MV_WPTR 0xc854 +#define HEVC_MPRED_MV_RPTR 0xc858 +#define HEVC_MPRED_MV_WR_ROW_JUMP 0xc85c +#define HEVC_MPRED_MV_RD_ROW_JUMP 0xc860 +#define HEVC_MPRED_CURR_LCU 0xc864 +#define HEVC_MPRED_ABV_WPTR 0xc868 +#define HEVC_MPRED_ABV_RPTR 0xc86c +#define HEVC_MPRED_CTRL2 0xc870 +#define HEVC_MPRED_CTRL3 0xc874 +#define HEVC_MPRED_L0_REF00_POC 0xc880 +#define HEVC_MPRED_L1_REF00_POC 0xc8c0 + +#define HEVC_MPRED_CTRL4 0xc930 + +#define HEVC_MPRED_CUR_POC 0xc980 +#define HEVC_MPRED_COL_POC 0xc984 +#define HEVC_MPRED_MV_RD_END_ADDR 0xc988 + +#define HEVC_MSP 0xcc00 +#define HEVC_MPSR 0xcc04 +#define HEVC_MCPU_INTR_MSK 0xcc10 +#define HEVC_MCPU_INTR_REQ 0xcc14 +#define HEVC_CPSR 0xcc84 + +#define HEVC_IMEM_DMA_CTRL 0xcd00 +#define HEVC_IMEM_DMA_ADR 0xcd04 +#define HEVC_IMEM_DMA_COUNT 0xcd08 + +#define HEVCD_IPP_TOP_CNTL 0xd000 +#define HEVCD_IPP_LINEBUFF_BASE 0xd024 +#define HEVCD_IPP_AXIIF_CONFIG 0xd02c + +#define VP9D_MPP_REF_SCALE_ENBL 0xd104 +#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0xd108 +#define VP9D_MPP_REFINFO_DATA 0xd10c + +#define HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR 0xd180 +#define HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR 0xd184 +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0xd190 + +#define HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR 0xd300 +#define HEVCD_MPP_ANC_CANVAS_DATA_ADDR 0xd304 +#define HEVCD_MPP_DECOMP_CTL1 0xd308 +#define HEVCD_MPP_DECOMP_CTL2 0xd30c +#define HEVCD_MCRCC_CTL1 0xd3c0 +#define HEVCD_MCRCC_CTL2 0xd3c4 +#define HEVCD_MCRCC_CTL3 0xd3c8 + +#define HEVC_DBLK_CFG0 0xd400 +#define HEVC_DBLK_CFG1 0xd404 +#define HEVC_DBLK_CFG2 0xd408 +#define HEVC_DBLK_CFG3 0xd40c +#define HEVC_DBLK_CFG4 0xd410 +#define HEVC_DBLK_CFG5 0xd414 +#define HEVC_DBLK_CFG6 0xd418 +#define HEVC_DBLK_CFG7 0xd41c +#define HEVC_DBLK_CFG8 0xd420 +#define HEVC_DBLK_CFG9 0xd424 +#define HEVC_DBLK_CFGA 0xd428 +#define HEVC_DBLK_STS0 0xd42c +#define HEVC_DBLK_CFGB 0xd42c +#define HEVC_DBLK_STS1 0xd430 +#define HEVC_DBLK_CFGE 0xd438 + +#define HEVC_SAO_VERSION 0xd800 +#define HEVC_SAO_CTRL0 0xd804 +#define HEVC_SAO_CTRL1 0xd808 +#define HEVC_SAO_PIC_SIZE 0xd814 +#define HEVC_SAO_PIC_SIZE_LCU 0xd818 +#define HEVC_SAO_TILE_START 0xd81c +#define HEVC_SAO_TILE_SIZE_LCU 0xd820 +#define HEVC_SAO_Y_START_ADDR 0xd82c +#define HEVC_SAO_Y_LENGTH 0xd830 +#define HEVC_SAO_C_START_ADDR 0xd834 +#define HEVC_SAO_C_LENGTH 0xd838 +#define HEVC_SAO_Y_WPTR 0xd83c +#define HEVC_SAO_C_WPTR 0xd840 +#define HEVC_SAO_ABV_START_ADDR 0xd844 +#define HEVC_SAO_VB_WR_START_ADDR 0xd848 +#define HEVC_SAO_VB_RD_START_ADDR 0xd84c +#define HEVC_SAO_ABV_WPTR 0xd850 +#define HEVC_SAO_ABV_RPTR 0xd854 +#define HEVC_SAO_VB_WPTR 0xd858 +#define HEVC_SAO_VB_RPTR 0xd85c +#define HEVC_SAO_CTRL2 0xd880 +#define HEVC_SAO_CTRL3 0xd884 +#define HEVC_SAO_CTRL4 0xd888 +#define HEVC_SAO_CTRL5 0xd88c +#define HEVC_SAO_CTRL6 0xd890 +#define HEVC_SAO_CTRL7 0xd894 +#define HEVC_CM_BODY_START_ADDR 0xd898 +#define HEVC_CM_BODY_LENGTH 0xd89c +#define HEVC_CM_HEADER_START_ADDR 0xd8a0 +#define HEVC_CM_HEADER_LENGTH 0xd8a4 +#define HEVC_CM_HEADER_OFFSET 0xd8ac +#define HEVC_SAO_MMU_VH0_ADDR 0xd8e8 +#define HEVC_SAO_MMU_VH1_ADDR 0xd8ec + +#define HEVC_IQIT_CLK_RST_CTRL 0xdc00 +#define HEVC_IQIT_SCALELUT_WR_ADDR 0xdc08 +#define HEVC_IQIT_SCALELUT_RD_ADDR 0xdc0c +#define HEVC_IQIT_SCALELUT_DATA 0xdc10 + +#define HEVC_PSCALE_CTRL 0xe444 + +#endif diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 5c5dabed2f09..3040136ceb77 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -168,7 +168,10 @@ static void process_num_buffers(struct vb2_queue *q, { const struct amvdec_format *fmt_out = sess->fmt_out; unsigned int buffers_total = q->num_buffers + *num_buffers; + u32 min_buf_capture = v4l2_ctrl_g_ctrl(sess->ctrl_min_buf_capture); + if (q->num_buffers + *num_buffers < min_buf_capture) + *num_buffers = min_buf_capture - q->num_buffers; if (is_reqbufs && buffers_total < fmt_out->min_buffers) *num_buffers = fmt_out->min_buffers - q->num_buffers; if (buffers_total > fmt_out->max_buffers) @@ -193,7 +196,8 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, if (*num_planes) { switch (q->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (*num_planes != 1 || sizes[0] < output_size) + if (*num_planes != 1 || + sizes[0] < sess->src_buffer_size) return -EINVAL; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: @@ -224,7 +228,7 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, switch (q->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - sizes[0] = amvdec_get_output_size(sess); + sizes[0] = sess->src_buffer_size; *num_planes = 1; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: @@ -250,6 +254,7 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, return -EINVAL; } + sess->changed_format = 1; return 0; } @@ -261,10 +266,11 @@ static void vdec_vb2_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(m2m_ctx, vbuf); - if (!sess->streamon_out || !sess->streamon_cap) + if (!sess->streamon_out) return; - if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + if (sess->streamon_cap && + vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && vdec_codec_needs_recycle(sess)) vdec_queue_recycle(sess, vb); @@ -289,16 +295,22 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) else sess->streamon_cap = 1; - if (!sess->streamon_out || !sess->streamon_cap) + if (!sess->streamon_out) return 0; if (sess->status == STATUS_NEEDS_RESUME && - q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + sess->changed_format) { codec_ops->resume(sess); sess->status = STATUS_RUNNING; return 0; } + if (sess->status == STATUS_RUNNING || + sess->status == STATUS_NEEDS_RESUME || + sess->status == STATUS_INIT) + return 0; + sess->vififo_size = SIZE_VIFIFO; sess->vififo_vaddr = dma_alloc_coherent(sess->core->dev, sess->vififo_size, @@ -323,13 +335,14 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) goto vififo_free; sess->sequence_cap = 0; + sess->sequence_out = 0; if (vdec_codec_needs_recycle(sess)) sess->recycle_thread = kthread_run(vdec_recycle_thread, sess, "vdec_recycle"); - sess->status = STATUS_RUNNING; + sess->status = STATUS_INIT; core->cur_sess = sess; - + schedule_work(&sess->esparser_queue_work); return 0; vififo_free: @@ -382,10 +395,12 @@ static void vdec_reset_bufs_recycle(struct amvdec_session *sess) static void vdec_stop_streaming(struct vb2_queue *q) { struct amvdec_session *sess = vb2_get_drv_priv(q); + struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; struct amvdec_core *core = sess->core; struct vb2_v4l2_buffer *buf; if (sess->status == STATUS_RUNNING || + sess->status == STATUS_INIT || (sess->status == STATUS_NEEDS_RESUME && (!sess->streamon_out || !sess->streamon_cap))) { if (vdec_codec_needs_recycle(sess)) @@ -409,6 +424,10 @@ static void vdec_stop_streaming(struct vb2_queue *q) sess->streamon_out = 0; } else { + /* Drain remaining refs if was still running */ + if (sess->status >= STATUS_RUNNING && codec_ops->drain) + codec_ops->drain(sess); + while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx))) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR); @@ -476,20 +495,34 @@ vdec_try_fmt_common(struct amvdec_session *sess, u32 size, struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt; const struct amvdec_format *fmts = sess->core->platform->formats; - const struct amvdec_format *fmt_out; + const struct amvdec_format *fmt_out = NULL; + u32 output_size = 0; memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); - if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: fmt_out = find_format(fmts, size, pixmp->pixelformat); if (!fmt_out) { pixmp->pixelformat = V4L2_PIX_FMT_MPEG2; fmt_out = find_format(fmts, size, pixmp->pixelformat); } + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + fmt_out = sess->fmt_out; + break; + default: + return NULL; + } + + pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width); + pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height); + output_size = get_output_size(pixmp->width, pixmp->height); - pfmt[0].sizeimage = - get_output_size(pixmp->width, pixmp->height); + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!pfmt[0].sizeimage) + pfmt[0].sizeimage = sess->src_buffer_size; pfmt[0].bytesperline = 0; pixmp->num_planes = 1; } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { @@ -499,35 +532,25 @@ vdec_try_fmt_common(struct amvdec_session *sess, u32 size, memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved)); if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) { - pfmt[0].sizeimage = - get_output_size(pixmp->width, pixmp->height); - pfmt[0].bytesperline = ALIGN(pixmp->width, 64); + pfmt[0].sizeimage = output_size; + pfmt[0].bytesperline = ALIGN(pixmp->width, 32); - pfmt[1].sizeimage = - get_output_size(pixmp->width, pixmp->height) / 2; - pfmt[1].bytesperline = ALIGN(pixmp->width, 64); + pfmt[1].sizeimage = output_size / 2; + pfmt[1].bytesperline = ALIGN(pixmp->width, 32); pixmp->num_planes = 2; } else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) { - pfmt[0].sizeimage = - get_output_size(pixmp->width, pixmp->height); - pfmt[0].bytesperline = ALIGN(pixmp->width, 64); + pfmt[0].sizeimage = output_size; + pfmt[0].bytesperline = ALIGN(pixmp->width, 32); - pfmt[1].sizeimage = - get_output_size(pixmp->width, pixmp->height) / 4; - pfmt[1].bytesperline = ALIGN(pixmp->width, 64) / 2; + pfmt[1].sizeimage = output_size / 4; + pfmt[1].bytesperline = ALIGN(pixmp->width, 32) / 2; - pfmt[2].sizeimage = - get_output_size(pixmp->width, pixmp->height) / 4; - pfmt[2].bytesperline = ALIGN(pixmp->width, 64) / 2; + pfmt[2].sizeimage = output_size / 2; + pfmt[2].bytesperline = ALIGN(pixmp->width, 32) / 2; pixmp->num_planes = 3; } - } else { - return NULL; } - pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width); - pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height); - if (pixmp->field == V4L2_FIELD_ANY) pixmp->field = V4L2_FIELD_NONE; @@ -586,6 +609,8 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f) orig_pixmp = *pixmp; fmt_out = vdec_try_fmt_common(sess, num_formats, f); + if (!fmt_out) + return -EINVAL; if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { pixfmt_out = pixmp->pixelformat; @@ -610,6 +635,7 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f) sess->ycbcr_enc = pixmp->ycbcr_enc; sess->quantization = pixmp->quantization; sess->xfer_func = pixmp->xfer_func; + sess->src_buffer_size = pixmp->plane_fmt[0].sizeimage; } memset(&format, 0, sizeof(format)); @@ -701,29 +727,31 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) if (!(sess->streamon_out & sess->streamon_cap)) return 0; - /* Currently not handled since we do not support dynamic resolution - * for MPEG2. We consider both queues streaming to mean that the - * decoding session is started - */ - if (cmd->cmd == V4L2_DEC_CMD_START) + if (cmd->cmd == V4L2_DEC_CMD_START) { + v4l2_m2m_clear_state(sess->m2m_ctx); + sess->should_stop = 0; return 0; + } /* Should not happen */ if (cmd->cmd != V4L2_DEC_CMD_STOP) return -EINVAL; dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n"); + sess->should_stop = 1; - vdec_wait_inactive(sess); + v4l2_m2m_mark_stopped(sess->m2m_ctx); if (codec_ops->drain) { + vdec_wait_inactive(sess); codec_ops->drain(sess); } else if (codec_ops->eos_sequence) { u32 len; const u8 *data = codec_ops->eos_sequence(&len); esparser_queue_eos(sess->core, data, len); + vdec_wait_inactive(sess); } return ret; @@ -883,6 +911,7 @@ static int vdec_open(struct file *file) sess->height = 720; sess->pixelaspect.numerator = 1; sess->pixelaspect.denominator = 1; + sess->src_buffer_size = SZ_1M; INIT_LIST_HEAD(&sess->timestamps); INIT_LIST_HEAD(&sess->bufs_recycle); @@ -1076,7 +1105,7 @@ static int vdec_probe(struct platform_device *pdev) video_set_drvdata(vdev, core); - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(dev, "Failed registering video device\n"); goto err_vdev_release; diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h index 0faa1ec4858e..f95445ac0658 100644 --- a/drivers/staging/media/meson/vdec/vdec.h +++ b/drivers/staging/media/meson/vdec/vdec.h @@ -29,13 +29,19 @@ struct amvdec_buffer { * struct amvdec_timestamp - stores a src timestamp along with a VIFIFO offset * * @list: used to make lists out of this struct - * @ts: timestamp + * @tc: timecode from the v4l2 buffer + * @ts: timestamp from the VB2 buffer * @offset: offset in the VIFIFO where the associated packet was written + * @flags: flags from the v4l2 buffer + * @used_count: times this timestamp was checked for a match with a dst buffer */ struct amvdec_timestamp { struct list_head list; + struct v4l2_timecode tc; u64 ts; u32 offset; + u32 flags; + u32 used_count; }; struct amvdec_session; @@ -165,6 +171,7 @@ struct amvdec_format { enum amvdec_status { STATUS_STOPPED, + STATUS_INIT, STATUS_RUNNING, STATUS_NEEDS_RESUME, }; @@ -180,6 +187,7 @@ enum amvdec_status { * @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE * @fmt_out: vdec pixel format for the OUTPUT queue * @pixfmt_cap: V4L2 pixel format for the CAPTURE queue + * @src_buffer_size: size in bytes of the OUTPUT buffers' only plane * @width: current picture width * @height: current picture height * @colorspace: current colorspace @@ -221,6 +229,7 @@ struct amvdec_session { const struct amvdec_format *fmt_out; u32 pixfmt_cap; + u32 src_buffer_size; u32 width; u32 height; @@ -235,10 +244,11 @@ struct amvdec_session { struct work_struct esparser_queue_work; unsigned int streamon_cap, streamon_out; - unsigned int sequence_cap; + unsigned int sequence_cap, sequence_out; unsigned int should_stop; unsigned int keyframe_found; unsigned int num_dst_bufs; + unsigned int changed_format; u8 canvas_alloc[MAX_CANVAS]; u32 canvas_num; diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c index f16948bdbf2f..7f07a9175815 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.c +++ b/drivers/staging/media/meson/vdec/vdec_helpers.c @@ -50,6 +50,33 @@ void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val) } EXPORT_SYMBOL_GPL(amvdec_write_parser); +/* 4 KiB per 64x32 block */ +u32 amvdec_am21c_body_size(u32 width, u32 height) +{ + u32 width_64 = ALIGN(width, 64) / 64; + u32 height_32 = ALIGN(height, 32) / 32; + + return SZ_4K * width_64 * height_32; +} +EXPORT_SYMBOL_GPL(amvdec_am21c_body_size); + +/* 32 bytes per 128x64 block */ +u32 amvdec_am21c_head_size(u32 width, u32 height) +{ + u32 width_128 = ALIGN(width, 128) / 128; + u32 height_64 = ALIGN(height, 64) / 64; + + return 32 * width_128 * height_64; +} +EXPORT_SYMBOL_GPL(amvdec_am21c_head_size); + +u32 amvdec_am21c_size(u32 width, u32 height) +{ + return ALIGN(amvdec_am21c_body_size(width, height) + + amvdec_am21c_head_size(width, height), SZ_64K); +} +EXPORT_SYMBOL_GPL(amvdec_am21c_size); + static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id) { int ret; @@ -154,8 +181,8 @@ int amvdec_set_canvases(struct amvdec_session *sess, { struct v4l2_m2m_buffer *buf; u32 pixfmt = sess->pixfmt_cap; - u32 width = ALIGN(sess->width, 64); - u32 height = ALIGN(sess->height, 64); + u32 width = ALIGN(sess->width, 32); + u32 height = ALIGN(sess->height, 32); u32 reg_cur = reg_base[0]; u32 reg_num_cur = 0; u32 reg_base_cur = 0; @@ -200,33 +227,23 @@ int amvdec_set_canvases(struct amvdec_session *sess, } EXPORT_SYMBOL_GPL(amvdec_set_canvases); -void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset) +void amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) { - struct amvdec_timestamp *new_ts, *tmp; + struct amvdec_timestamp *new_ts; unsigned long flags; - new_ts = kmalloc(sizeof(*new_ts), GFP_KERNEL); + new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL); new_ts->ts = ts; + new_ts->tc = tc; new_ts->offset = offset; + new_ts->flags = vbuf_flags; spin_lock_irqsave(&sess->ts_spinlock, flags); - - if (list_empty(&sess->timestamps)) - goto add_tail; - - list_for_each_entry(tmp, &sess->timestamps, list) { - if (ts <= tmp->ts) { - list_add_tail(&new_ts->list, &tmp->list); - goto unlock; - } - } - -add_tail: list_add_tail(&new_ts->list, &sess->timestamps); -unlock: spin_unlock_irqrestore(&sess->ts_spinlock, flags); } -EXPORT_SYMBOL_GPL(amvdec_add_ts_reorder); +EXPORT_SYMBOL_GPL(amvdec_add_ts); void amvdec_remove_ts(struct amvdec_session *sess, u64 ts) { @@ -251,8 +268,8 @@ EXPORT_SYMBOL_GPL(amvdec_remove_ts); static void dst_buf_done(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf, - u32 field, - u64 timestamp) + u32 field, u64 timestamp, + struct v4l2_timecode timecode, u32 flags) { struct device *dev = sess->core->dev_dec; u32 output_size = amvdec_get_output_size(sess); @@ -271,19 +288,27 @@ static void dst_buf_done(struct amvdec_session *sess, vbuf->vb2_buf.timestamp = timestamp; vbuf->sequence = sess->sequence_cap++; + vbuf->flags = flags; + vbuf->timecode = timecode; if (sess->should_stop && - atomic_read(&sess->esparser_queued_bufs) <= 2) { + atomic_read(&sess->esparser_queued_bufs) <= 1) { const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; - dev_dbg(dev, "Signaling EOS\n"); + dev_dbg(dev, "Signaling EOS, sequence_cap = %u\n", + sess->sequence_cap - 1); v4l2_event_queue_fh(&sess->fh, &ev); vbuf->flags |= V4L2_BUF_FLAG_LAST; + } else if (sess->status == STATUS_NEEDS_RESUME) { + /* Mark LAST for drained show frames during a source change */ + vbuf->flags |= V4L2_BUF_FLAG_LAST; + sess->sequence_cap = 0; } else if (sess->should_stop) dev_dbg(dev, "should_stop, %u bufs remain\n", atomic_read(&sess->esparser_queued_bufs)); - dev_dbg(dev, "Buffer %u done\n", vbuf->vb2_buf.index); + dev_dbg(dev, "Buffer %u done, ts = %llu, flags = %08X\n", + vbuf->vb2_buf.index, timestamp, flags); vbuf->field = field; v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); @@ -297,7 +322,9 @@ void amvdec_dst_buf_done(struct amvdec_session *sess, struct device *dev = sess->core->dev_dec; struct amvdec_timestamp *tmp; struct list_head *timestamps = &sess->timestamps; + struct v4l2_timecode timecode; u64 timestamp; + u32 vbuf_flags; unsigned long flags; spin_lock_irqsave(&sess->ts_spinlock, flags); @@ -312,11 +339,13 @@ void amvdec_dst_buf_done(struct amvdec_session *sess, tmp = list_first_entry(timestamps, struct amvdec_timestamp, list); timestamp = tmp->ts; + timecode = tmp->tc; + vbuf_flags = tmp->flags; list_del(&tmp->list); kfree(tmp); spin_unlock_irqrestore(&sess->ts_spinlock, flags); - dst_buf_done(sess, vbuf, field, timestamp); + dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags); atomic_dec(&sess->esparser_queued_bufs); } EXPORT_SYMBOL_GPL(amvdec_dst_buf_done); @@ -328,48 +357,43 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess, struct device *dev = sess->core->dev_dec; struct amvdec_timestamp *match = NULL; struct amvdec_timestamp *tmp, *n; + struct v4l2_timecode timecode = { 0 }; u64 timestamp = 0; + u32 vbuf_flags = 0; unsigned long flags; spin_lock_irqsave(&sess->ts_spinlock, flags); /* Look for our vififo offset to get the corresponding timestamp. */ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) { - s64 delta = (s64)offset - tmp->offset; - - /* Offsets reported by codecs usually differ slightly, - * so we need some wiggle room. - * 4KiB being the minimum packet size, there is no risk here. - */ - if (delta > (-1 * (s32)SZ_4K) && delta < SZ_4K) { - match = tmp; + if (tmp->offset > offset) { + /* + * Delete any record that remained unused for 32 match + * checks + */ + if (tmp->used_count++ >= 32) { + list_del(&tmp->list); + kfree(tmp); + } break; } - if (!allow_drop) - continue; - - /* Delete any timestamp entry that appears before our target - * (not all src packets/timestamps lead to a frame) - */ - if (delta > 0 || delta < -1 * (s32)sess->vififo_size) { - atomic_dec(&sess->esparser_queued_bufs); - list_del(&tmp->list); - kfree(tmp); - } + match = tmp; } if (!match) { - dev_dbg(dev, "Buffer %u done but can't match offset (%08X)\n", + dev_err(dev, "Buffer %u done but can't match offset (%08X)\n", vbuf->vb2_buf.index, offset); } else { timestamp = match->ts; + timecode = match->tc; + vbuf_flags = match->flags; list_del(&match->list); kfree(match); } spin_unlock_irqrestore(&sess->ts_spinlock, flags); - dst_buf_done(sess, vbuf, field, timestamp); + dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags); if (match) atomic_dec(&sess->esparser_queued_bufs); } @@ -420,16 +444,19 @@ void amvdec_src_change(struct amvdec_session *sess, u32 width, v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size); - /* Check if the capture queue is already configured well for our + /* + * Check if the capture queue is already configured well for our * usecase. If so, keep decoding with it and do not send the event */ - if (sess->width == width && + if (sess->streamon_cap && + sess->width == width && sess->height == height && dpb_size <= sess->num_dst_bufs) { sess->fmt_out->codec_ops->resume(sess); return; } + sess->changed_format = 0; sess->width = width; sess->height = height; sess->status = STATUS_NEEDS_RESUME; diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h index a455a9ee1cc2..cfaed52ab526 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.h +++ b/drivers/staging/media/meson/vdec/vdec_helpers.h @@ -27,6 +27,10 @@ void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val); u32 amvdec_read_parser(struct amvdec_core *core, u32 reg); void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val); +u32 amvdec_am21c_body_size(u32 width, u32 height); +u32 amvdec_am21c_head_size(u32 width, u32 height); +u32 amvdec_am21c_size(u32 width, u32 height); + /** * amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding * @@ -44,13 +48,15 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess, u32 offset, u32 field, bool allow_drop); /** - * amvdec_add_ts_reorder() - Add a timestamp to the list in chronological order + * amvdec_add_ts() - Add a timestamp to the list * * @sess: current session * @ts: timestamp to add * @offset: offset in the VIFIFO where the associated packet was written + * @flags the vb2_v4l2_buffer flags */ -void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset); +void amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 flags); void amvdec_remove_ts(struct amvdec_session *sess, u64 ts); /** diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c new file mode 100644 index 000000000000..9530e580e57a --- /dev/null +++ b/drivers/staging/media/meson/vdec/vdec_hevc.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr> + * + * VDEC_HEVC is a video decoding block that allows decoding of + * HEVC, VP9 + */ + +#include <linux/firmware.h> +#include <linux/clk.h> + +#include "vdec_1.h" +#include "vdec_helpers.h" +#include "vdec_hevc.h" +#include "hevc_regs.h" +#include "dos_regs.h" + +/* AO Registers */ +#define AO_RTI_GEN_PWR_SLEEP0 0xe8 +#define AO_RTI_GEN_PWR_ISO0 0xec + #define GEN_PWR_VDEC_HEVC (BIT(7) | BIT(6)) + #define GEN_PWR_VDEC_HEVC_SM1 (BIT(2)) + +#define MC_SIZE (4096 * 4) + +static int vdec_hevc_load_firmware(struct amvdec_session *sess, + const char *fwname) +{ + struct amvdec_core *core = sess->core; + struct device *dev = core->dev_dec; + const struct firmware *fw; + static void *mc_addr; + static dma_addr_t mc_addr_map; + int ret; + u32 i = 100; + + ret = request_firmware(&fw, fwname, dev); + if (ret < 0) { + dev_err(dev, "Unable to request firmware %s\n", fwname); + return ret; + } + + if (fw->size < MC_SIZE) { + dev_err(dev, "Firmware size %zu is too small. Expected %u.\n", + fw->size, MC_SIZE); + ret = -EINVAL; + goto release_firmware; + } + + mc_addr = dma_alloc_coherent(core->dev, MC_SIZE, &mc_addr_map, + GFP_KERNEL); + if (!mc_addr) { + ret = -ENOMEM; + goto release_firmware; + } + + memcpy(mc_addr, fw->data, MC_SIZE); + + amvdec_write_dos(core, HEVC_MPSR, 0); + amvdec_write_dos(core, HEVC_CPSR, 0); + + amvdec_write_dos(core, HEVC_IMEM_DMA_ADR, mc_addr_map); + amvdec_write_dos(core, HEVC_IMEM_DMA_COUNT, MC_SIZE / 4); + amvdec_write_dos(core, HEVC_IMEM_DMA_CTRL, (0x8000 | (7 << 16))); + + while (i && (readl(core->dos_base + HEVC_IMEM_DMA_CTRL) & 0x8000)) + i--; + + if (i == 0) { + dev_err(dev, "Firmware load fail (DMA hang?)\n"); + ret = -ENODEV; + } + + dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map); +release_firmware: + release_firmware(fw); + return ret; +} + +static void vdec_hevc_stbuf_init(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + + amvdec_write_dos(core, HEVC_STREAM_CONTROL, + amvdec_read_dos(core, HEVC_STREAM_CONTROL) & ~1); + amvdec_write_dos(core, HEVC_STREAM_START_ADDR, sess->vififo_paddr); + amvdec_write_dos(core, HEVC_STREAM_END_ADDR, + sess->vififo_paddr + sess->vififo_size); + amvdec_write_dos(core, HEVC_STREAM_RD_PTR, sess->vififo_paddr); + amvdec_write_dos(core, HEVC_STREAM_WR_PTR, sess->vififo_paddr); +} + +/* VDEC_HEVC specific ESPARSER configuration */ +static void vdec_hevc_conf_esparser(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + + /* set vififo_vbuf_rp_sel=>vdec_hevc */ + amvdec_write_dos(core, DOS_GEN_CTRL0, 3 << 1); + amvdec_write_dos(core, HEVC_STREAM_CONTROL, + amvdec_read_dos(core, HEVC_STREAM_CONTROL) | BIT(3)); + amvdec_write_dos(core, HEVC_STREAM_CONTROL, + amvdec_read_dos(core, HEVC_STREAM_CONTROL) | 1); + amvdec_write_dos(core, HEVC_STREAM_FIFO_CTL, + amvdec_read_dos(core, HEVC_STREAM_FIFO_CTL) | BIT(29)); +} + +static u32 vdec_hevc_vififo_level(struct amvdec_session *sess) +{ + return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL); +} + +static int vdec_hevc_stop(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; + + /* Disable interrupt */ + amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 0); + /* Disable firmware processor */ + amvdec_write_dos(core, HEVC_MPSR, 0); + + if (sess->priv) + codec_ops->stop(sess); + + /* Enable VDEC_HEVC Isolation */ + if (core->platform->revision == VDEC_REVISION_SM1) + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, + GEN_PWR_VDEC_HEVC_SM1, + GEN_PWR_VDEC_HEVC_SM1); + else + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, + 0xc00, 0xc00); + + /* VDEC_HEVC Memories */ + amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0xffffffffUL); + + if (core->platform->revision == VDEC_REVISION_SM1) + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, + GEN_PWR_VDEC_HEVC_SM1, + GEN_PWR_VDEC_HEVC_SM1); + else + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, + GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC); + + clk_disable_unprepare(core->vdec_hevc_clk); + if (core->platform->revision == VDEC_REVISION_G12A || + core->platform->revision == VDEC_REVISION_SM1) + clk_disable_unprepare(core->vdec_hevcf_clk); + + return 0; +} + +static int vdec_hevc_start(struct amvdec_session *sess) +{ + int ret; + struct amvdec_core *core = sess->core; + struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; + + if (core->platform->revision == VDEC_REVISION_G12A || + core->platform->revision == VDEC_REVISION_SM1) { + clk_set_rate(core->vdec_hevcf_clk, 666666666); + ret = clk_prepare_enable(core->vdec_hevcf_clk); + if (ret) + return ret; + } + + clk_set_rate(core->vdec_hevc_clk, 666666666); + ret = clk_prepare_enable(core->vdec_hevc_clk); + if (ret) + return ret; + + if (core->platform->revision == VDEC_REVISION_SM1) + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, + GEN_PWR_VDEC_HEVC_SM1, 0); + else + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, + GEN_PWR_VDEC_HEVC, 0); + usleep_range(10, 20); + + /* Reset VDEC_HEVC*/ + amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff); + amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000); + + amvdec_write_dos(core, DOS_GCLK_EN3, 0xffffffff); + + /* VDEC_HEVC Memories */ + amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0x00000000); + + /* Remove VDEC_HEVC Isolation */ + if (core->platform->revision == VDEC_REVISION_SM1) + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, + GEN_PWR_VDEC_HEVC_SM1, 0); + else + regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, + 0xc00, 0); + + amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff); + amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000); + + vdec_hevc_stbuf_init(sess); + + ret = vdec_hevc_load_firmware(sess, sess->fmt_out->firmware_path); + if (ret) + goto stop; + + ret = codec_ops->start(sess); + if (ret) + goto stop; + + amvdec_write_dos(core, DOS_SW_RESET3, BIT(12) | BIT(11)); + amvdec_write_dos(core, DOS_SW_RESET3, 0); + amvdec_read_dos(core, DOS_SW_RESET3); + + amvdec_write_dos(core, HEVC_MPSR, 1); + /* Let the firmware settle */ + usleep_range(10, 20); + + return 0; + +stop: + vdec_hevc_stop(sess); + return ret; +} + +struct amvdec_ops vdec_hevc_ops = { + .start = vdec_hevc_start, + .stop = vdec_hevc_stop, + .conf_esparser = vdec_hevc_conf_esparser, + .vififo_level = vdec_hevc_vififo_level, +}; diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.h b/drivers/staging/media/meson/vdec/vdec_hevc.h new file mode 100644 index 000000000000..cd576a73a966 --- /dev/null +++ b/drivers/staging/media/meson/vdec/vdec_hevc.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr> + */ + +#ifndef __MESON_VDEC_VDEC_HEVC_H_ +#define __MESON_VDEC_VDEC_HEVC_H_ + +#include "vdec.h" + +extern struct amvdec_ops vdec_hevc_ops; + +#endif diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c index ea39f8209ec7..eabbebab2da2 100644 --- a/drivers/staging/media/meson/vdec/vdec_platform.c +++ b/drivers/staging/media/meson/vdec/vdec_platform.c @@ -8,10 +8,25 @@ #include "vdec.h" #include "vdec_1.h" +#include "vdec_hevc.h" #include "codec_mpeg12.h" +#include "codec_h264.h" +#include "codec_vp9.h" static const struct amvdec_format vdec_formats_gxbb[] = { { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/gxbb_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { .pixfmt = V4L2_PIX_FMT_MPEG1, .min_buffers = 8, .max_buffers = 8, @@ -21,6 +36,7 @@ static const struct amvdec_format vdec_formats_gxbb[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, { .pixfmt = V4L2_PIX_FMT_MPEG2, .min_buffers = 8, @@ -31,11 +47,36 @@ static const struct amvdec_format vdec_formats_gxbb[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, }; static const struct amvdec_format vdec_formats_gxl[] = { { + .pixfmt = V4L2_PIX_FMT_VP9, + .min_buffers = 16, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_hevc_ops, + .codec_ops = &codec_vp9_ops, + .firmware_path = "meson/vdec/gxl_vp9.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/gxl_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { .pixfmt = V4L2_PIX_FMT_MPEG1, .min_buffers = 8, .max_buffers = 8, @@ -45,6 +86,7 @@ static const struct amvdec_format vdec_formats_gxl[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, { .pixfmt = V4L2_PIX_FMT_MPEG2, .min_buffers = 8, @@ -55,11 +97,24 @@ static const struct amvdec_format vdec_formats_gxl[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, }; static const struct amvdec_format vdec_formats_gxm[] = { { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/gxm_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { .pixfmt = V4L2_PIX_FMT_MPEG1, .min_buffers = 8, .max_buffers = 8, @@ -69,6 +124,7 @@ static const struct amvdec_format vdec_formats_gxm[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, { .pixfmt = V4L2_PIX_FMT_MPEG2, .min_buffers = 8, @@ -79,11 +135,36 @@ static const struct amvdec_format vdec_formats_gxm[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, }; static const struct amvdec_format vdec_formats_g12a[] = { { + .pixfmt = V4L2_PIX_FMT_VP9, + .min_buffers = 16, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_hevc_ops, + .codec_ops = &codec_vp9_ops, + .firmware_path = "meson/vdec/g12a_vp9.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/g12a_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { .pixfmt = V4L2_PIX_FMT_MPEG1, .min_buffers = 8, .max_buffers = 8, @@ -93,6 +174,7 @@ static const struct amvdec_format vdec_formats_g12a[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, { .pixfmt = V4L2_PIX_FMT_MPEG2, .min_buffers = 8, @@ -103,11 +185,36 @@ static const struct amvdec_format vdec_formats_g12a[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, }; static const struct amvdec_format vdec_formats_sm1[] = { { + .pixfmt = V4L2_PIX_FMT_VP9, + .min_buffers = 16, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_hevc_ops, + .codec_ops = &codec_vp9_ops, + .firmware_path = "meson/vdec/sm1_vp9_mmu.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/g12a_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { .pixfmt = V4L2_PIX_FMT_MPEG1, .min_buffers = 8, .max_buffers = 8, @@ -117,6 +224,7 @@ static const struct amvdec_format vdec_formats_sm1[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, { .pixfmt = V4L2_PIX_FMT_MPEG2, .min_buffers = 8, @@ -127,6 +235,7 @@ static const struct amvdec_format vdec_formats_sm1[] = { .codec_ops = &codec_mpeg12_ops, .firmware_path = "meson/vdec/gxl_mpeg12.bin", .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, }, }; diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 673aa3a5f2bd..66975a37dc85 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -1111,7 +1111,7 @@ static int iss_video_open(struct file *file) goto done; } - ret = v4l2_pipeline_pm_use(&video->video.entity, 1); + ret = v4l2_pipeline_pm_get(&video->video.entity); if (ret < 0) { omap4iss_put(video->iss); goto done; @@ -1160,7 +1160,7 @@ static int iss_video_release(struct file *file) /* Disable streaming and free the buffers queue resources. */ iss_video_streamoff(file, vfh, video->type); - v4l2_pipeline_pm_use(&video->video.entity, 0); + v4l2_pipeline_pm_put(&video->video.entity); /* Release the videobuf2 queue */ vb2_queue_release(&handle->queue); @@ -1242,7 +1242,7 @@ int omap4iss_video_init(struct iss_video *video, const char *name) video->video.fops = &iss_video_fops; snprintf(video->video.name, sizeof(video->video.name), "OMAP4 ISS %s %s", name, direction); - video->video.vfl_type = VFL_TYPE_GRABBER; + video->video.vfl_type = VFL_TYPE_VIDEO; video->video.release = video_device_release_empty; video->video.ioctl_ops = &iss_video_ioctl_ops; video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED; @@ -1270,7 +1270,7 @@ int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev) video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT; video->video.device_caps |= V4L2_CAP_STREAMING; - ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) dev_err(video->iss->dev, "could not register video device (%d)\n", ret); diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO index 03cd9a4e70f7..0aa9877dd64a 100644 --- a/drivers/staging/media/rkisp1/TODO +++ b/drivers/staging/media/rkisp1/TODO @@ -1,4 +1,3 @@ -* Fix serialization on subdev ops. * Don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port(). e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c. diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c index 524e0dd38c1b..24fe6a7888aa 100644 --- a/drivers/staging/media/rkisp1/rkisp1-capture.c +++ b/drivers/staging/media/rkisp1/rkisp1-capture.c @@ -937,10 +937,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue) rkisp1_return_all_buffers(cap, VB2_BUF_STATE_ERROR); - ret = v4l2_pipeline_pm_use(&node->vdev.entity, 0); - if (ret) - dev_err(rkisp1->dev, "pipeline close failed error:%d\n", ret); - + v4l2_pipeline_pm_put(&node->vdev.entity); ret = pm_runtime_put(rkisp1->dev); if (ret) dev_err(rkisp1->dev, "power down failed error:%d\n", ret); @@ -999,7 +996,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) dev_err(cap->rkisp1->dev, "power up failed %d\n", ret); goto err_destroy_dummy; } - ret = v4l2_pipeline_pm_use(entity, 1); + ret = v4l2_pipeline_pm_get(entity); if (ret) { dev_err(cap->rkisp1->dev, "open cif pipeline failed %d\n", ret); goto err_pipe_pm_put; @@ -1025,7 +1022,7 @@ err_pipe_disable: rkisp1_pipeline_sink_walk(entity, NULL, rkisp1_pipeline_disable_cb); err_stop_stream: rkisp1_stream_stop(cap); - v4l2_pipeline_pm_use(entity, 0); + v4l2_pipeline_pm_put(entity); err_pipe_pm_put: pm_runtime_put(cap->rkisp1->dev); err_destroy_dummy: @@ -1344,7 +1341,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap) q = &node->buf_queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; - q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR; + q->io_modes = VB2_MMAP | VB2_DMABUF; q->drv_priv = cap; q->ops = &rkisp1_vb2_ops; q->mem_ops = &vb2_dma_contig_memops; @@ -1362,7 +1359,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap) vdev->queue = q; - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(cap->rkisp1->dev, "failed to register %s, ret=%d\n", vdev->name, ret); diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h index 369a401b098a..b291cc60de8e 100644 --- a/drivers/staging/media/rkisp1/rkisp1-common.h +++ b/drivers/staging/media/rkisp1/rkisp1-common.h @@ -96,6 +96,7 @@ struct rkisp1_sensor_async { * @sink_crop: crop for sink pad * @src_fmt: output format * @src_crop: output size + * @ops_lock: ops serialization * * @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt) * @frame_sequence: used to synchronize frame_id between video devices. @@ -107,6 +108,7 @@ struct rkisp1_isp { struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX]; const struct rkisp1_isp_mbus_info *sink_fmt; const struct rkisp1_isp_mbus_info *src_fmt; + struct mutex ops_lock; bool is_dphy_errctrl_disabled; atomic_t frame_sequence; }; @@ -224,6 +226,7 @@ struct rkisp1_resizer { struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX]; const struct rkisp1_rsz_config *config; enum rkisp1_fmt_pix_type fmt_type; + struct mutex ops_lock; }; struct rkisp1_debug { diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c index 558126e66465..b1b3c058e957 100644 --- a/drivers/staging/media/rkisp1/rkisp1-dev.c +++ b/drivers/staging/media/rkisp1/rkisp1-dev.c @@ -128,7 +128,7 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1) ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode, MEDIA_PAD_FL_SOURCE); - if (ret) { + if (ret < 0) { dev_err(sd->dev, "failed to find src pad for %s\n", sd->name); return ret; @@ -145,14 +145,15 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1) flags = 0; } - flags = MEDIA_LNK_FL_ENABLED; + flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; /* create ISP->RSZ->CAP links */ for (i = 0; i < 2; i++) { source = &rkisp1->isp.sd.entity; sink = &rkisp1->resizer_devs[i].sd.entity; ret = media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_VIDEO, - sink, RKISP1_RSZ_PAD_SINK, flags); + sink, RKISP1_RSZ_PAD_SINK, + MEDIA_LNK_FL_ENABLED); if (ret) return ret; @@ -219,19 +220,17 @@ static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier) container_of(notifier, struct rkisp1_device, notifier); int ret; - mutex_lock(&rkisp1->media_dev.graph_mutex); ret = rkisp1_create_links(rkisp1); if (ret) - goto unlock; + return ret; + ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev); if (ret) - goto unlock; + return ret; dev_dbg(rkisp1->dev, "Async subdev notifier completed\n"); -unlock: - mutex_unlock(&rkisp1->media_dev.graph_mutex); - return ret; + return 0; } static int rkisp1_fwnode_parse(struct device *dev, @@ -502,8 +501,7 @@ static int rkisp1_probe(struct platform_device *pdev) strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME, sizeof(rkisp1->media_dev.model)); rkisp1->media_dev.dev = &pdev->dev; - strscpy(rkisp1->media_dev.bus_info, - "platform: " RKISP1_DRIVER_NAME, + strscpy(rkisp1->media_dev.bus_info, RKISP1_BUS_INFO, sizeof(rkisp1->media_dev.bus_info)); media_device_init(&rkisp1->media_dev); diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c index 328c7ea60971..fa53f05e37d8 100644 --- a/drivers/staging/media/rkisp1/rkisp1-isp.c +++ b/drivers/staging/media/rkisp1/rkisp1-isp.c @@ -28,9 +28,9 @@ #define RKISP1_DIR_SINK_SRC (RKISP1_DIR_SINK | RKISP1_DIR_SRC) /* - * NOTE: MIPI controller and input MUX are also configured in this file, - * because ISP Subdev is not only describe ISP submodule(input size,format, - * output size, format), but also a virtual route device. + * NOTE: MIPI controller and input MUX are also configured in this file. + * This is because ISP Subdev describes not only ISP submodule (input size, + * format, output size, format), but also a virtual route device. */ /* @@ -504,7 +504,7 @@ static int rkisp1_config_cif(struct rkisp1_device *rkisp1) return 0; } -static int rkisp1_isp_stop(struct rkisp1_device *rkisp1) +static void rkisp1_isp_stop(struct rkisp1_device *rkisp1) { u32 val; @@ -540,8 +540,6 @@ static int rkisp1_isp_stop(struct rkisp1_device *rkisp1) RKISP1_CIF_IRCL_MIPI_SW_RST | RKISP1_CIF_IRCL_ISP_SW_RST, RKISP1_CIF_IRCL); rkisp1_write(rkisp1, 0x0, RKISP1_CIF_IRCL); - - return 0; } static void rkisp1_config_clk(struct rkisp1_device *rkisp1) @@ -555,7 +553,7 @@ static void rkisp1_config_clk(struct rkisp1_device *rkisp1) rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL); } -static int rkisp1_isp_start(struct rkisp1_device *rkisp1) +static void rkisp1_isp_start(struct rkisp1_device *rkisp1) { struct rkisp1_sensor_async *sensor = rkisp1->active_sensor; u32 val; @@ -580,8 +578,6 @@ static int rkisp1_isp_start(struct rkisp1_device *rkisp1) * the MIPI interface and before starting the sensor output. */ usleep_range(1000, 1200); - - return 0; } /* ---------------------------------------------------------------------------- @@ -683,7 +679,7 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp, src_fmt->code = format->code; mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code); - if (!mbus_info) { + if (!mbus_info || !(mbus_info->direction & RKISP1_DIR_SRC)) { src_fmt->code = RKISP1_DEF_SRC_PAD_FMT; mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code); } @@ -767,7 +763,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, which); sink_fmt->code = format->code; mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); - if (!mbus_info) { + if (!mbus_info || !(mbus_info->direction & RKISP1_DIR_SINK)) { sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT; mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); } @@ -795,7 +791,9 @@ static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd, { struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + mutex_lock(&isp->ops_lock); fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which); + mutex_unlock(&isp->ops_lock); return 0; } @@ -805,6 +803,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd, { struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + mutex_lock(&isp->ops_lock); if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO) rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which); else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO) @@ -813,6 +812,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd, fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which); + mutex_unlock(&isp->ops_lock); return 0; } @@ -821,11 +821,13 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_selection *sel) { struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + int ret = 0; if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO && sel->pad != RKISP1_ISP_PAD_SINK_VIDEO) return -EINVAL; + mutex_lock(&isp->ops_lock); switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) { @@ -848,10 +850,10 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd, sel->which); break; default: - return -EINVAL; + ret = -EINVAL; } - - return 0; + mutex_unlock(&isp->ops_lock); + return ret; } static int rkisp1_isp_set_selection(struct v4l2_subdev *sd, @@ -861,21 +863,23 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd, struct rkisp1_device *rkisp1 = container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev); struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + int ret = 0; if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__, sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height); - + mutex_lock(&isp->ops_lock); if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which); else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO) rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which); else - return -EINVAL; + ret = -EINVAL; - return 0; + mutex_unlock(&isp->ops_lock); + return ret; } static int rkisp1_subdev_link_validate(struct media_link *link) @@ -936,13 +940,12 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable) { struct rkisp1_device *rkisp1 = container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev); + struct rkisp1_isp *isp = &rkisp1->isp; struct v4l2_subdev *sensor_sd; int ret = 0; if (!enable) { - ret = rkisp1_isp_stop(rkisp1); - if (ret) - return ret; + rkisp1_isp_stop(rkisp1); rkisp1_mipi_csi2_stop(rkisp1->active_sensor); return 0; } @@ -953,22 +956,23 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable) rkisp1->active_sensor = container_of(sensor_sd->asd, struct rkisp1_sensor_async, asd); + if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY) + return -EINVAL; + atomic_set(&rkisp1->isp.frame_sequence, -1); + mutex_lock(&isp->ops_lock); ret = rkisp1_config_cif(rkisp1); if (ret) - return ret; - - if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY) - return -EINVAL; + goto mutex_unlock; ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor); if (ret) - return ret; + goto mutex_unlock; - ret = rkisp1_isp_start(rkisp1); - if (ret) - rkisp1_mipi_csi2_stop(rkisp1->active_sensor); + rkisp1_isp_start(rkisp1); +mutex_unlock: + mutex_unlock(&isp->ops_lock); return ret; } @@ -1028,6 +1032,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1, isp->sink_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SINK_PAD_FMT); isp->src_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SRC_PAD_FMT); + mutex_init(&isp->ops_lock); ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads); if (ret) return ret; diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c index 781f0ca85af1..44d542caf32b 100644 --- a/drivers/staging/media/rkisp1/rkisp1-params.c +++ b/drivers/staging/media/rkisp1/rkisp1-params.c @@ -1605,7 +1605,7 @@ int rkisp1_params_register(struct rkisp1_params *params, ret = media_entity_pads_init(&vdev->entity, 1, &node->pad); if (ret) goto err_release_queue; - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(&vdev->dev, "failed to register %s, ret=%d\n", vdev->name, ret); diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c index 8cdc29c1a178..87799fbf0363 100644 --- a/drivers/staging/media/rkisp1/rkisp1-resizer.c +++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c @@ -503,6 +503,8 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz, sink_crop->top = 0; sink_crop->width = sink_fmt->width; sink_crop->height = sink_fmt->height; + + *r = *sink_crop; return; } @@ -537,15 +539,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) rsz->fmt_type = mbus_info->fmt_type; - if (rsz->id == RKISP1_MAINPATH && - mbus_info->fmt_type == RKISP1_FMT_BAYER) { - sink_crop->left = 0; - sink_crop->top = 0; - sink_crop->width = sink_fmt->width; - sink_crop->height = sink_fmt->height; - return; - } - /* Propagete to source pad */ src_fmt->code = sink_fmt->code; @@ -569,7 +562,9 @@ static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd, struct rkisp1_resizer *rsz = container_of(sd, struct rkisp1_resizer, sd); + mutex_lock(&rsz->ops_lock); fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which); + mutex_unlock(&rsz->ops_lock); return 0; } @@ -580,11 +575,13 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd, struct rkisp1_resizer *rsz = container_of(sd, struct rkisp1_resizer, sd); + mutex_lock(&rsz->ops_lock); if (fmt->pad == RKISP1_RSZ_PAD_SINK) rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which); else rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which); + mutex_unlock(&rsz->ops_lock); return 0; } @@ -595,10 +592,12 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd, struct rkisp1_resizer *rsz = container_of(sd, struct rkisp1_resizer, sd); struct v4l2_mbus_framefmt *mf_sink; + int ret = 0; if (sel->pad == RKISP1_RSZ_PAD_SRC) return -EINVAL; + mutex_lock(&rsz->ops_lock); switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, @@ -613,10 +612,11 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd, sel->which); break; default: - return -EINVAL; + ret = -EINVAL; } - return 0; + mutex_unlock(&rsz->ops_lock); + return ret; } static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd, @@ -632,7 +632,9 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd, dev_dbg(sd->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__, sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height); + mutex_lock(&rsz->ops_lock); rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which); + mutex_unlock(&rsz->ops_lock); return 0; } @@ -672,9 +674,11 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable) if (other->is_streaming) when = RKISP1_SHADOW_REGS_ASYNC; + mutex_lock(&rsz->ops_lock); rkisp1_rsz_config(rsz, when); rkisp1_dcrop_config(rsz); + mutex_unlock(&rsz->ops_lock); return 0; } @@ -720,6 +724,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz) rsz->fmt_type = RKISP1_DEF_FMT_TYPE; + mutex_init(&rsz->ops_lock); ret = media_entity_pads_init(&sd->entity, 2, pads); if (ret) return ret; diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c index d98ea15837de..6dfcbdc3deb8 100644 --- a/drivers/staging/media/rkisp1/rkisp1-stats.c +++ b/drivers/staging/media/rkisp1/rkisp1-stats.c @@ -70,8 +70,7 @@ static int rkisp1_stats_querycap(struct file *file, strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver)); strscpy(cap->card, vdev->name, sizeof(cap->card)); - strscpy(cap->bus_info, "platform: " RKISP1_DRIVER_NAME, - sizeof(cap->bus_info)); + strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info)); return 0; } @@ -487,7 +486,7 @@ int rkisp1_stats_register(struct rkisp1_stats *stats, if (ret) goto err_release_queue; - ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(&vdev->dev, "failed to register %s, ret=%d\n", vdev->name, ret); diff --git a/drivers/staging/media/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c index 7b9448e3c9ba..39f513f69b89 100644 --- a/drivers/staging/media/soc_camera/soc_camera.c +++ b/drivers/staging/media/soc_camera/soc_camera.c @@ -2068,7 +2068,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd) v4l2_disable_ioctl(icd->vdev, VIDIOC_S_STD); v4l2_disable_ioctl(icd->vdev, VIDIOC_ENUMSTD); } - ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(icd->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(icd->pdev, "video_register_device failed: %d\n", ret); return ret; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index c6ddd46eff82..05a85517ff60 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -414,7 +414,7 @@ static int cedrus_probe(struct platform_device *pdev) dev->mdev.ops = &cedrus_m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_m2m; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index bfb4a4820a67..54ee2aa423e2 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -610,8 +610,12 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx) goto err_mv_col_buf; } + /* + * NOTE: Multiplying by two deviates from CedarX logic, but it + * is for some unknown reason needed for H264 4K decoding on H6. + */ ctx->codec.h264.intra_pred_buf_size = - ALIGN(ctx->src_fmt.width, 64) * 5; + ALIGN(ctx->src_fmt.width, 64) * 5 * 2; ctx->codec.h264.intra_pred_buf = dma_alloc_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size, diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c index e18fd48981da..d3e63512a765 100644 --- a/drivers/staging/media/tegra-vde/vde.c +++ b/drivers/staging/media/tegra-vde/vde.c @@ -949,7 +949,6 @@ static int tegra_vde_runtime_resume(struct device *dev) static int tegra_vde_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *regs; struct tegra_vde *vde; int irq, err; @@ -959,75 +958,39 @@ static int tegra_vde_probe(struct platform_device *pdev) platform_set_drvdata(pdev, vde); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sxe"); - if (!regs) - return -ENODEV; - - vde->sxe = devm_ioremap_resource(dev, regs); + vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe"); if (IS_ERR(vde->sxe)) return PTR_ERR(vde->sxe); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bsev"); - if (!regs) - return -ENODEV; - - vde->bsev = devm_ioremap_resource(dev, regs); + vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev"); if (IS_ERR(vde->bsev)) return PTR_ERR(vde->bsev); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbe"); - if (!regs) - return -ENODEV; - - vde->mbe = devm_ioremap_resource(dev, regs); + vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe"); if (IS_ERR(vde->mbe)) return PTR_ERR(vde->mbe); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe"); - if (!regs) - return -ENODEV; - - vde->ppe = devm_ioremap_resource(dev, regs); + vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe"); if (IS_ERR(vde->ppe)) return PTR_ERR(vde->ppe); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mce"); - if (!regs) - return -ENODEV; - - vde->mce = devm_ioremap_resource(dev, regs); + vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce"); if (IS_ERR(vde->mce)) return PTR_ERR(vde->mce); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tfe"); - if (!regs) - return -ENODEV; - - vde->tfe = devm_ioremap_resource(dev, regs); + vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe"); if (IS_ERR(vde->tfe)) return PTR_ERR(vde->tfe); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppb"); - if (!regs) - return -ENODEV; - - vde->ppb = devm_ioremap_resource(dev, regs); + vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb"); if (IS_ERR(vde->ppb)) return PTR_ERR(vde->ppb); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vdma"); - if (!regs) - return -ENODEV; - - vde->vdma = devm_ioremap_resource(dev, regs); + vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma"); if (IS_ERR(vde->vdma)) return PTR_ERR(vde->vdma); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "frameid"); - if (!regs) - return -ENODEV; - - vde->frameid = devm_ioremap_resource(dev, regs); + vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid"); if (IS_ERR(vde->frameid)) return PTR_ERR(vde->frameid); diff --git a/drivers/media/usb/usbvision/Kconfig b/drivers/staging/media/usbvision/Kconfig index e1039fdfb0ea..c6e1afb5ac48 100644 --- a/drivers/media/usb/usbvision/Kconfig +++ b/drivers/staging/media/usbvision/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config VIDEO_USBVISION - tristate "USB video devices based on Nogatech NT1003/1004/1005" - depends on I2C && VIDEO_V4L2 + tristate "USB video devices based on Nogatech NT1003/1004/1005 (Deprecated)" + depends on MEDIA_USB_SUPPORT && I2C && VIDEO_V4L2 select VIDEO_TUNER select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT help @@ -9,5 +9,10 @@ config VIDEO_USBVISION NT1003/1004/1005 USB Bridges. This driver enables using those devices. + This driver is deprecated and scheduled for removal by the + end of 2020. See the TODO file in drivers/staging/media/usbvision + for a list of actions that have to be done in order to prevent + removal of this driver. + To compile this driver as a module, choose M here: the module will be called usbvision. diff --git a/drivers/media/usb/usbvision/Makefile b/drivers/staging/media/usbvision/Makefile index 4d8541b9d4f8..4d8541b9d4f8 100644 --- a/drivers/media/usb/usbvision/Makefile +++ b/drivers/staging/media/usbvision/Makefile diff --git a/drivers/staging/media/usbvision/TODO b/drivers/staging/media/usbvision/TODO new file mode 100644 index 000000000000..e9fb4d125581 --- /dev/null +++ b/drivers/staging/media/usbvision/TODO @@ -0,0 +1,11 @@ +The driver is deprecated and scheduled for removal by the end +of 2020. + +In order to prevent removal the following actions would have to +be taken: + +- clean up the code +- convert to the vb2 framework +- fix the disconnect and free-on-last-user handling (i.e., add + a release callback for struct v4l2_device and rework the code + to use that correctly). diff --git a/drivers/media/usb/usbvision/usbvision-cards.c b/drivers/staging/media/usbvision/usbvision-cards.c index 5e0cbbfe7c86..5e0cbbfe7c86 100644 --- a/drivers/media/usb/usbvision/usbvision-cards.c +++ b/drivers/staging/media/usbvision/usbvision-cards.c diff --git a/drivers/media/usb/usbvision/usbvision-cards.h b/drivers/staging/media/usbvision/usbvision-cards.h index 07ec83512743..07ec83512743 100644 --- a/drivers/media/usb/usbvision/usbvision-cards.h +++ b/drivers/staging/media/usbvision/usbvision-cards.h diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/staging/media/usbvision/usbvision-core.c index f05a5c84dc18..f05a5c84dc18 100644 --- a/drivers/media/usb/usbvision/usbvision-core.c +++ b/drivers/staging/media/usbvision/usbvision-core.c diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c index 6e4df3335b1b..6e4df3335b1b 100644 --- a/drivers/media/usb/usbvision/usbvision-i2c.c +++ b/drivers/staging/media/usbvision/usbvision-i2c.c diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/staging/media/usbvision/usbvision-video.c index 5ca2c2f35fe2..3ea25fdcf767 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/staging/media/usbvision/usbvision-video.c @@ -1271,7 +1271,7 @@ static int usbvision_register_video(struct usb_usbvision *usbvision) if (usbvision->have_tuner) usbvision->vdev.device_caps |= V4L2_CAP_TUNER; - if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0) + if (video_register_device(&usbvision->vdev, VFL_TYPE_VIDEO, video_nr) < 0) goto err_exit; printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n", usbvision->nr, video_device_node_name(&usbvision->vdev)); diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/staging/media/usbvision/usbvision.h index 11539578e8d2..11539578e8d2 100644 --- a/drivers/media/usb/usbvision/usbvision.h +++ b/drivers/staging/media/usbvision/usbvision.h diff --git a/drivers/staging/most/Documentation/ABI/configfs-most.txt b/drivers/staging/most/Documentation/ABI/configfs-most.txt deleted file mode 100644 index 2bf811449b0b..000000000000 --- a/drivers/staging/most/Documentation/ABI/configfs-most.txt +++ /dev/null @@ -1,204 +0,0 @@ -What: /sys/kernel/config/most_<component> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: Interface is used to configure and connect device channels - to component drivers. - - Attributes are visible only when configfs is mounted. To mount - configfs in /sys/kernel/config directory use: - # mount -t configfs none /sys/kernel/config/ - - -What: /sys/kernel/config/most_cdev/<link> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: - The attributes: - - buffer_size configure the buffer size for this channel - - subbuffer_size configure the sub-buffer size for this channel - (needed for synchronous and isochrnous data) - - - num_buffers configure number of buffers used for this - channel - - datatype configure type of data that will travel over - this channel - - direction configure whether this link will be an input - or output - - dbr_size configure DBR data buffer size (this is used - for MediaLB communication only) - - packets_per_xact - configure the number of packets that will be - collected from the network before being - transmitted via USB (this is used for USB - communication only) - - device name of the device the link is to be attached to - - channel name of the channel the link is to be attached to - - comp_params pass parameters needed by some components - - create_link write '1' to this attribute to trigger the - creation of the link. In case of speculative - configuration, the creation is post-poned until - a physical device is being attached to the bus. - - destroy_link write '1' to this attribute to destroy an - active link - -What: /sys/kernel/config/most_video/<link> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: - The attributes: - - buffer_size configure the buffer size for this channel - - subbuffer_size configure the sub-buffer size for this channel - (needed for synchronous and isochrnous data) - - - num_buffers configure number of buffers used for this - channel - - datatype configure type of data that will travel over - this channel - - direction configure whether this link will be an input - or output - - dbr_size configure DBR data buffer size (this is used - for MediaLB communication only) - - packets_per_xact - configure the number of packets that will be - collected from the network before being - transmitted via USB (this is used for USB - communication only) - - device name of the device the link is to be attached to - - channel name of the channel the link is to be attached to - - comp_params pass parameters needed by some components - - create_link write '1' to this attribute to trigger the - creation of the link. In case of speculative - configuration, the creation is post-poned until - a physical device is being attached to the bus. - - destroy_link write '1' to this attribute to destroy an - active link - -What: /sys/kernel/config/most_net/<link> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: - The attributes: - - buffer_size configure the buffer size for this channel - - subbuffer_size configure the sub-buffer size for this channel - (needed for synchronous and isochrnous data) - - - num_buffers configure number of buffers used for this - channel - - datatype configure type of data that will travel over - this channel - - direction configure whether this link will be an input - or output - - dbr_size configure DBR data buffer size (this is used - for MediaLB communication only) - - packets_per_xact - configure the number of packets that will be - collected from the network before being - transmitted via USB (this is used for USB - communication only) - - device name of the device the link is to be attached to - - channel name of the channel the link is to be attached to - - comp_params pass parameters needed by some components - - create_link write '1' to this attribute to trigger the - creation of the link. In case of speculative - configuration, the creation is post-poned until - a physical device is being attached to the bus. - - destroy_link write '1' to this attribute to destroy an - active link - -What: /sys/kernel/config/most_sound/<card> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: - The attributes: - - create_card write '1' to this attribute to trigger the - registration of the sound card with the ALSA - subsystem. - -What: /sys/kernel/config/most_sound/<card>/<link> -Date: March 8, 2019 -KernelVersion: 5.2 -Description: - The attributes: - - buffer_size configure the buffer size for this channel - - subbuffer_size configure the sub-buffer size for this channel - (needed for synchronous and isochrnous data) - - - num_buffers configure number of buffers used for this - channel - - datatype configure type of data that will travel over - this channel - - direction configure whether this link will be an input - or output - - dbr_size configure DBR data buffer size (this is used - for MediaLB communication only) - - packets_per_xact - configure the number of packets that will be - collected from the network before being - transmitted via USB (this is used for USB - communication only) - - device name of the device the link is to be attached to - - channel name of the channel the link is to be attached to - - comp_params pass parameters needed by some components - - create_link write '1' to this attribute to trigger the - creation of the link. In case of speculative - configuration, the creation is post-poned until - a physical device is being attached to the bus. - - destroy_link write '1' to this attribute to destroy an - active link - -What: /sys/kernel/config/rdma_cm/<hca>/ports/<port-num>/default_roce_tos -Date: March 8, 2019 -KernelVersion: 5.2 -Description: RDMA-CM QPs from HCA <hca> at port <port-num> - will be created with this TOS as default. - This can be overridden by using the rdma_set_option API. - The possible RoCE TOS values are 0-255. diff --git a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt deleted file mode 100644 index d8fa841e3742..000000000000 --- a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt +++ /dev/null @@ -1,313 +0,0 @@ -What: /sys/bus/most/devices/.../description -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Provides information about the interface type and the physical - location of the device. Hardware attached via USB, for instance, - might return <usb_device 1-1.1:1.0> -Users: - -What: /sys/bus/most/devices/.../interface -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the type of peripheral interface the device uses. -Users: - -What: /sys/bus/most/devices/.../dci -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - If the network interface controller is attached via USB, a dci - directory is created that allows applications to read and - write the controller's DCI registers. -Users: - -What: /sys/bus/most/devices/.../dci/arb_address -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to set an arbitrary DCI register address an - application wants to read from or write to. -Users: - -What: /sys/bus/most/devices/.../dci/arb_value -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to read and write the DCI register whose address - is stored in arb_address. -Users: - -What: /sys/bus/most/devices/.../dci/mep_eui48_hi -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MAC address. -Users: - -What: /sys/bus/most/devices/.../dci/mep_eui48_lo -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MAC address. -Users: - -What: /sys/bus/most/devices/.../dci/mep_eui48_mi -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MAC address. -Users: - -What: /sys/bus/most/devices/.../dci/mep_filter -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MEP filter address. -Users: - -What: /sys/bus/most/devices/.../dci/mep_hash0 -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MEP hash table. -Users: - -What: /sys/bus/most/devices/.../dci/mep_hash1 -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MEP hash table. -Users: - -What: /sys/bus/most/devices/.../dci/mep_hash2 -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MEP hash table. -Users: - -What: /sys/bus/most/devices/.../dci/mep_hash3 -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to check and configure the MEP hash table. -Users: - -What: /sys/bus/most/devices/.../dci/ni_state -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the current network interface state. -Users: - -What: /sys/bus/most/devices/.../dci/node_address -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the current node address. -Users: - -What: /sys/bus/most/devices/.../dci/node_position -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the current node position. -Users: - -What: /sys/bus/most/devices/.../dci/packet_bandwidth -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the configured packet bandwidth. -Users: - -What: /sys/bus/most/devices/.../dci/sync_ep -Date: June 2016 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Triggers the controller's synchronization process for a certain - endpoint. -Users: - -What: /sys/bus/most/devices/.../<channel>/ -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - For every channel of the device a directory is created, whose - name is dictated by the HDM. This enables an application to - collect information about the channel's capabilities and - configure it. -Users: - -What: /sys/bus/most/devices/.../<channel>/available_datatypes -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the data types the current channel can transport. -Users: - -What: /sys/bus/most/devices/.../<channel>/available_directions -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the directions the current channel is capable of. -Users: - -What: /sys/bus/most/devices/.../<channel>/number_of_packet_buffers -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the number of packet buffers the current channel can - handle. -Users: - -What: /sys/bus/most/devices/.../<channel>/number_of_stream_buffers -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the number of streaming buffers the current channel can - handle. -Users: - -What: /sys/bus/most/devices/.../<channel>/size_of_packet_buffer -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the size of a packet buffer the current channel can - handle. -Users: - -What: /sys/bus/most/devices/.../<channel>/size_of_stream_buffer -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates the size of a streaming buffer the current channel can - handle. -Users: - -What: /sys/bus/most/devices/.../<channel>/set_number_of_buffers -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the number of buffers of the current channel. -Users: - -What: /sys/bus/most/devices/.../<channel>/set_buffer_size -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the size of a buffer of the current channel. -Users: - -What: /sys/bus/most/devices/.../<channel>/set_direction -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the direction of the current channel. - The following strings will be accepted: - 'dir_tx', - 'dir_rx' -Users: - -What: /sys/bus/most/devices/.../<channel>/set_datatype -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the data type of the current channel. - The following strings will be accepted: - 'control', - 'async', - 'sync', - 'isoc_avp' -Users: - -What: /sys/bus/most/devices/.../<channel>/set_subbuffer_size -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the subbuffer size of the current channel. -Users: - -What: /sys/bus/most/devices/.../<channel>/set_packets_per_xact -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is to configure the number of packets per transaction of - the current channel. This is only needed network interface - controller is attached via USB. -Users: - -What: /sys/bus/most/devices/.../<channel>/channel_starving -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - Indicates whether current channel ran out of buffers. -Users: - -What: /sys/bus/most/drivers/mostcore/add_link -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to link a channel to a component of the - mostcore. A link created by writing to this file is - referred to as pipe. -Users: - -What: /sys/bus/most/drivers/mostcore/remove_link -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to unlink a channel from a component. -Users: - -What: /sys/bus/most/drivers/mostcore/components -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to retrieve a list of registered components. -Users: - -What: /sys/bus/most/drivers/mostcore/links -Date: March 2017 -KernelVersion: 4.15 -Contact: Christian Gromm <christian.gromm@microchip.com> -Description: - This is used to retrieve a list of established links. -Users: diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig index 6262eb25c80b..c5a99f750abe 100644 --- a/drivers/staging/most/Kconfig +++ b/drivers/staging/most/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -menuconfig MOST +menuconfig MOST_COMPONENTS tristate "MOST support" - depends on HAS_DMA && CONFIGFS_FS + depends on HAS_DMA && CONFIGFS_FS && MOST default n help Say Y here if you want to enable MOST support. @@ -16,7 +16,7 @@ menuconfig MOST -if MOST +if MOST_COMPONENTS source "drivers/staging/most/cdev/Kconfig" diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile index 20a99ecb37c4..a803a98654a8 100644 --- a/drivers/staging/most/Makefile +++ b/drivers/staging/most/Makefile @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_MOST) += most_core.o -most_core-y := core.o -most_core-y += configfs.o obj-$(CONFIG_MOST_CDEV) += cdev/ obj-$(CONFIG_MOST_NET) += net/ diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c index 71943d17f825..cc1e3dea196d 100644 --- a/drivers/staging/most/cdev/cdev.c +++ b/drivers/staging/most/cdev/cdev.c @@ -16,8 +16,7 @@ #include <linux/kfifo.h> #include <linux/uaccess.h> #include <linux/idr.h> - -#include "../most.h" +#include <linux/most.h> #define CHRDEV_REGION_SIZE 50 diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c index 16593281fcda..8e0f27e61652 100644 --- a/drivers/staging/most/dim2/dim2.c +++ b/drivers/staging/most/dim2/dim2.c @@ -20,8 +20,7 @@ #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/kthread.h> - -#include "../most.h" +#include <linux/most.h> #include "hal.h" #include "errors.h" #include "sysfs.h" diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c index 2980f7065846..893a8babdb2f 100644 --- a/drivers/staging/most/i2c/i2c.c +++ b/drivers/staging/most/i2c/i2c.c @@ -13,8 +13,7 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/err.h> - -#include "../most.h" +#include <linux/most.h> enum { CH_RX, CH_TX, NUM_CHANNELS }; diff --git a/drivers/staging/most/most.h b/drivers/staging/most/most.h deleted file mode 100644 index 232e01b7f5d2..000000000000 --- a/drivers/staging/most/most.h +++ /dev/null @@ -1,337 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * most.h - API for component and adapter drivers - * - * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG - */ - -#ifndef __MOST_CORE_H__ -#define __MOST_CORE_H__ - -#include <linux/types.h> -#include <linux/device.h> - -struct module; -struct interface_private; - -/** - * Interface type - */ -enum most_interface_type { - ITYPE_LOOPBACK = 1, - ITYPE_I2C, - ITYPE_I2S, - ITYPE_TSI, - ITYPE_HBI, - ITYPE_MEDIALB_DIM, - ITYPE_MEDIALB_DIM2, - ITYPE_USB, - ITYPE_PCIE -}; - -/** - * Channel direction. - */ -enum most_channel_direction { - MOST_CH_RX = 1 << 0, - MOST_CH_TX = 1 << 1, -}; - -/** - * Channel data type. - */ -enum most_channel_data_type { - MOST_CH_CONTROL = 1 << 0, - MOST_CH_ASYNC = 1 << 1, - MOST_CH_ISOC = 1 << 2, - MOST_CH_SYNC = 1 << 5, -}; - -enum most_status_flags { - /* MBO was processed successfully (data was send or received )*/ - MBO_SUCCESS = 0, - /* The MBO contains wrong or missing information. */ - MBO_E_INVAL, - /* MBO was completed as HDM Channel will be closed */ - MBO_E_CLOSE, -}; - -/** - * struct most_channel_capability - Channel capability - * @direction: Supported channel directions. - * The value is bitwise OR-combination of the values from the - * enumeration most_channel_direction. Zero is allowed value and means - * "channel may not be used". - * @data_type: Supported channel data types. - * The value is bitwise OR-combination of the values from the - * enumeration most_channel_data_type. Zero is allowed value and means - * "channel may not be used". - * @num_buffers_packet: Maximum number of buffers supported by this channel - * for packet data types (Async,Control,QoS) - * @buffer_size_packet: Maximum buffer size supported by this channel - * for packet data types (Async,Control,QoS) - * @num_buffers_streaming: Maximum number of buffers supported by this channel - * for streaming data types (Sync,AV Packetized) - * @buffer_size_streaming: Maximum buffer size supported by this channel - * for streaming data types (Sync,AV Packetized) - * @name_suffix: Optional suffix providean by an HDM that is attached to the - * regular channel name. - * - * Describes the capabilities of a MOST channel like supported Data Types - * and directions. This information is provided by an HDM for the MostCore. - * - * The Core creates read only sysfs attribute files in - * /sys/devices/most/mdev#/<channel>/ with the - * following attributes: - * -available_directions - * -available_datatypes - * -number_of_packet_buffers - * -number_of_stream_buffers - * -size_of_packet_buffer - * -size_of_stream_buffer - * where content of each file is a string with all supported properties of this - * very channel attribute. - */ -struct most_channel_capability { - u16 direction; - u16 data_type; - u16 num_buffers_packet; - u16 buffer_size_packet; - u16 num_buffers_streaming; - u16 buffer_size_streaming; - const char *name_suffix; -}; - -/** - * struct most_channel_config - stores channel configuration - * @direction: direction of the channel - * @data_type: data type travelling over this channel - * @num_buffers: number of buffers - * @buffer_size: size of a buffer for AIM. - * Buffer size may be cutted down by HDM in a configure callback - * to match to a given interface and channel type. - * @extra_len: additional buffer space for internal HDM purposes like padding. - * May be set by HDM in a configure callback if needed. - * @subbuffer_size: size of a subbuffer - * @packets_per_xact: number of MOST frames that are packet inside one USB - * packet. This is USB specific - * - * Describes the configuration for a MOST channel. This information is - * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a - * parameter of the "configure" function call. - */ -struct most_channel_config { - enum most_channel_direction direction; - enum most_channel_data_type data_type; - u16 num_buffers; - u16 buffer_size; - u16 extra_len; - u16 subbuffer_size; - u16 packets_per_xact; - u16 dbr_size; -}; - -/* - * struct mbo - MOST Buffer Object. - * @context: context for core completion handler - * @priv: private data for HDM - * - * public: documented fields that are used for the communications - * between MostCore and HDMs - * - * @list: list head for use by the mbo's current owner - * @ifp: (in) associated interface instance - * @num_buffers_ptr: amount of pool buffers - * @hdm_channel_id: (in) HDM channel instance - * @virt_address: (in) kernel virtual address of the buffer - * @bus_address: (in) bus address of the buffer - * @buffer_length: (in) buffer payload length - * @processed_length: (out) processed length - * @status: (out) transfer status - * @complete: (in) completion routine - * - * The core allocates and initializes the MBO. - * - * The HDM receives MBO for transfer from the core with the call to enqueue(). - * The HDM copies the data to- or from the buffer depending on configured - * channel direction, set "processed_length" and "status" and completes - * the transfer procedure by calling the completion routine. - * - * Finally, the MBO is being deallocated or recycled for further - * transfers of the same or a different HDM. - * - * Directions of usage: - * The core driver should never access any MBO fields (even if marked - * as "public") while the MBO is owned by an HDM. The ownership starts with - * the call of enqueue() and ends with the call of its complete() routine. - * - * II. - * Every HDM attached to the core driver _must_ ensure that it returns any MBO - * it owns (due to a previous call to enqueue() by the core driver) before it - * de-registers an interface or gets unloaded from the kernel. If this direction - * is violated memory leaks will occur, since the core driver does _not_ track - * MBOs it is currently not in control of. - * - */ -struct mbo { - void *context; - void *priv; - struct list_head list; - struct most_interface *ifp; - int *num_buffers_ptr; - u16 hdm_channel_id; - void *virt_address; - dma_addr_t bus_address; - u16 buffer_length; - u16 processed_length; - enum most_status_flags status; - void (*complete)(struct mbo *mbo); -}; - -/** - * Interface instance description. - * - * Describes an interface of a MOST device the core driver is bound to. - * This structure is allocated and initialized in the HDM. MostCore may not - * modify this structure. - * - * @dev: the actual device - * @mod: module - * @interface Interface type. \sa most_interface_type. - * @description PRELIMINARY. - * Unique description of the device instance from point of view of the - * interface in free text form (ASCII). - * It may be a hexadecimal presentation of the memory address for the MediaLB - * IP or USB device ID with USB properties for USB interface, etc. - * @num_channels Number of channels and size of the channel_vector. - * @channel_vector Properties of the channels. - * Array index represents channel ID by the driver. - * @configure Callback to change data type for the channel of the - * interface instance. May be zero if the instance of the interface is not - * configurable. Parameter channel_config describes direction and data - * type for the channel, configured by the higher level. The content of - * @enqueue Delivers MBO to the HDM for processing. - * After HDM completes Rx- or Tx- operation the processed MBO shall - * be returned back to the MostCore using completion routine. - * The reason to get the MBO delivered from the MostCore after the channel - * is poisoned is the re-opening of the channel by the application. - * In this case the HDM shall hold MBOs and service the channel as usual. - * The HDM must be able to hold at least one MBO for each channel. - * The callback returns a negative value on error, otherwise 0. - * @poison_channel Informs HDM about closing the channel. The HDM shall - * cancel all transfers and synchronously or asynchronously return - * all enqueued for this channel MBOs using the completion routine. - * The callback returns a negative value on error, otherwise 0. - * @request_netinfo: triggers retrieving of network info from the HDM by - * means of "Message exchange over MDP/MEP" - * The call of the function request_netinfo with the parameter on_netinfo as - * NULL prohibits use of the previously obtained function pointer. - * @priv Private field used by mostcore to store context information. - */ -struct most_interface { - struct device *dev; - struct device *driver_dev; - struct module *mod; - enum most_interface_type interface; - const char *description; - unsigned int num_channels; - struct most_channel_capability *channel_vector; - void *(*dma_alloc)(struct mbo *mbo, u32 size); - void (*dma_free)(struct mbo *mbo, u32 size); - int (*configure)(struct most_interface *iface, int channel_idx, - struct most_channel_config *channel_config); - int (*enqueue)(struct most_interface *iface, int channel_idx, - struct mbo *mbo); - int (*poison_channel)(struct most_interface *iface, int channel_idx); - void (*request_netinfo)(struct most_interface *iface, int channel_idx, - void (*on_netinfo)(struct most_interface *iface, - unsigned char link_stat, - unsigned char *mac_addr)); - void *priv; - struct interface_private *p; -}; - -/** - * struct most_component - identifies a loadable component for the mostcore - * @list: list_head - * @name: component name - * @probe_channel: function for core to notify driver about channel connection - * @disconnect_channel: callback function to disconnect a certain channel - * @rx_completion: completion handler for received packets - * @tx_completion: completion handler for transmitted packets - */ -struct most_component { - struct list_head list; - const char *name; - struct module *mod; - int (*probe_channel)(struct most_interface *iface, int channel_idx, - struct most_channel_config *cfg, char *name, - char *param); - int (*disconnect_channel)(struct most_interface *iface, - int channel_idx); - int (*rx_completion)(struct mbo *mbo); - int (*tx_completion)(struct most_interface *iface, int channel_idx); - int (*cfg_complete)(void); -}; - -/** - * most_register_interface - Registers instance of the interface. - * @iface: Pointer to the interface instance description. - * - * Returns a pointer to the kobject of the generated instance. - * - * Note: HDM has to ensure that any reference held on the kobj is - * released before deregistering the interface. - */ -int most_register_interface(struct most_interface *iface); - -/** - * Deregisters instance of the interface. - * @intf_instance Pointer to the interface instance description. - */ -void most_deregister_interface(struct most_interface *iface); -void most_submit_mbo(struct mbo *mbo); - -/** - * most_stop_enqueue - prevents core from enqueing MBOs - * @iface: pointer to interface - * @channel_idx: channel index - */ -void most_stop_enqueue(struct most_interface *iface, int channel_idx); - -/** - * most_resume_enqueue - allow core to enqueue MBOs again - * @iface: pointer to interface - * @channel_idx: channel index - * - * This clears the enqueue halt flag and enqueues all MBOs currently - * in wait fifo. - */ -void most_resume_enqueue(struct most_interface *iface, int channel_idx); -int most_register_component(struct most_component *comp); -int most_deregister_component(struct most_component *comp); -struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx, - struct most_component *comp); -void most_put_mbo(struct mbo *mbo); -int channel_has_mbo(struct most_interface *iface, int channel_idx, - struct most_component *comp); -int most_start_channel(struct most_interface *iface, int channel_idx, - struct most_component *comp); -int most_stop_channel(struct most_interface *iface, int channel_idx, - struct most_component *comp); -int __init configfs_init(void); -int most_register_configfs_subsys(struct most_component *comp); -void most_deregister_configfs_subsys(struct most_component *comp); -int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name, - char *comp_param); -int most_remove_link(char *mdev, char *mdev_ch, char *comp_name); -int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val); -int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val); -int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val); -int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val); -int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf); -int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf); -int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val); -int most_cfg_complete(char *comp_name); -void most_interface_register_notify(const char *mdev_name); -#endif /* MOST_CORE_H_ */ diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c index 5547e36e09de..830f089f1a88 100644 --- a/drivers/staging/most/net/net.c +++ b/drivers/staging/most/net/net.c @@ -15,8 +15,7 @@ #include <linux/list.h> #include <linux/wait.h> #include <linux/kobject.h> - -#include "../most.h" +#include <linux/most.h> #define MEP_HDR_LEN 8 #define MDP_HDR_LEN 16 diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c index 44cf2334834f..1527f410af2b 100644 --- a/drivers/staging/most/sound/sound.c +++ b/drivers/staging/most/sound/sound.c @@ -17,8 +17,7 @@ #include <sound/pcm_params.h> #include <linux/sched.h> #include <linux/kthread.h> - -#include "../most.h" +#include <linux/most.h> #define DRIVER_NAME "sound" #define STRING_SIZE 80 diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c index 0bda88c4bc89..e8c5a8c98375 100644 --- a/drivers/staging/most/usb/usb.c +++ b/drivers/staging/most/usb/usb.c @@ -23,8 +23,7 @@ #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> - -#include "../most.h" +#include <linux/most.h> #define USB_MTU 512 #define NO_ISOCHRONOUS_URB 0 diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c index d32ae49d617b..829df899b993 100644 --- a/drivers/staging/most/video/video.c +++ b/drivers/staging/most/video/video.c @@ -20,8 +20,7 @@ #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> - -#include "../most.h" +#include <linux/most.h> #define V4L2_CMP_MAX_INPUT 1 @@ -74,7 +73,7 @@ static int comp_vdev_open(struct file *filp) struct comp_fh *fh; switch (vdev->vfl_type) { - case VFL_TYPE_GRABBER: + case VFL_TYPE_VIDEO: break; default: return -EINVAL; @@ -424,7 +423,7 @@ static int comp_register_videodev(struct most_video_dev *mdev) /* Register the v4l2 device */ video_set_drvdata(mdev->vdev, mdev); - ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(mdev->vdev, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n", ret); diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c index 20b898954416..14592ed9ce98 100644 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c +++ b/drivers/staging/mt7621-dma/mtk-hsdma.c @@ -220,8 +220,7 @@ static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma) mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX), mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX)); - dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " - "intr_stat %08x, intr_mask %08x\n", + dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n", mtk_hsdma_read(hsdma, HSDMA_REG_INFO), mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG), mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT), diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index 1fb560ff059c..a7c0d3115d72 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts @@ -114,6 +114,10 @@ &pcie { pinctrl-names = "default"; pinctrl-0 = <&pcie_pins>; + + reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>, + <&gpio 8 GPIO_ACTIVE_LOW>, + <&gpio 7 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index d89d68ffa7bc..9e5cf68731bb 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi @@ -286,7 +286,7 @@ pcie_pins: pcie0 { pcie0 { groups = "pcie"; - function = "pcie rst"; + function = "gpio"; }; }; @@ -512,7 +512,6 @@ #address-cells = <3>; #size-cells = <2>; - perst-gpio = <&gpio 19 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pcie_pins>; @@ -532,12 +531,14 @@ status = "disabled"; - resets = <&rstctrl 23 &rstctrl 24 &rstctrl 25 &rstctrl 26>; - reset-names = "pcie", "pcie0", "pcie1", "pcie2"; + resets = <&rstctrl 24 &rstctrl 25 &rstctrl 26>; + reset-names = "pcie0", "pcie1", "pcie2"; clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>; clock-names = "pcie0", "pcie1", "pcie2"; - phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>; - phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2"; + phys = <&pcie0_phy 1>, <&pcie2_phy 0>; + phy-names = "pcie-phy0", "pcie-phy2"; + + reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>; pcie@0,0 { reg = <0x0000 0 0 0 0>; @@ -570,7 +571,7 @@ #phy-cells = <1>; }; - pcie1_phy: pcie-phy@1e14a000 { + pcie2_phy: pcie-phy@1e14a000 { compatible = "mediatek,mt7621-pci-phy"; reg = <0x1e14a000 0x0700>; #phy-cells = <1>; diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c index d2a07f145143..57743fd22be4 100644 --- a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c +++ b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c @@ -75,34 +75,27 @@ #define RG_PE1_FRC_MSTCKDIV BIT(5) -#define MAX_PHYS 2 +#define XTAL_MODE_SEL_SHIFT 6 +#define XTAL_MODE_SEL_MASK 0x7 -/** - * struct mt7621_pci_phy_instance - Mt7621 Pcie PHY device - * @phy: pointer to the kernel PHY device - * @port_base: base register - * @index: internal ID to identify the Mt7621 PCIe PHY - */ -struct mt7621_pci_phy_instance { - struct phy *phy; - void __iomem *port_base; - u32 index; -}; +#define MAX_PHYS 2 /** * struct mt7621_pci_phy - Mt7621 Pcie PHY core * @dev: pointer to device * @regmap: kernel regmap pointer - * @phys: pointer to Mt7621 PHY device - * @nphys: number of PHY devices for this core + * @phy: pointer to the kernel PHY device + * @port_base: base register + * @has_dual_port: if the phy has dual ports. * @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst' * needs to be executed. Depends on chip revision. */ struct mt7621_pci_phy { struct device *dev; struct regmap *regmap; - struct mt7621_pci_phy_instance **phys; - int nphys; + struct phy *phy; + void __iomem *port_base; + bool has_dual_port; bool bypass_pipe_rst; }; @@ -120,162 +113,152 @@ static inline void phy_write(struct mt7621_pci_phy *phy, u32 val, u32 reg) regmap_write(phy->regmap, reg, val); } -static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy, - struct mt7621_pci_phy_instance *instance) +static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy, + u32 reg, u32 clr, u32 set) +{ + u32 val = phy_read(phy, reg); + + val &= ~clr; + val |= set; + phy_write(phy, val, reg); +} + +static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy) { - u32 offset = (instance->index != 1) ? - RG_PE1_PIPE_REG : RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH; - u32 reg; - - reg = phy_read(phy, offset); - reg &= ~(RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC); - reg |= (RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC); - phy_write(phy, reg, offset); + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_RST); + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_CMD_FRC); + + if (phy->has_dual_port) { + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, + 0, RG_PE1_PIPE_RST); + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, + 0, RG_PE1_PIPE_CMD_FRC); + } } -static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy, - struct mt7621_pci_phy_instance *instance) +static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy) { struct device *dev = phy->dev; - u32 reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0); - u32 offset; - u32 val; + u32 xtal_mode; + + xtal_mode = (rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0) + >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK; - reg = (reg >> 6) & 0x7; /* Set PCIe Port PHY to disable SSC */ /* Debug Xtal Type */ - val = phy_read(phy, RG_PE1_FRC_H_XTAL_REG); - val &= ~(RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE); - val |= RG_PE1_FRC_H_XTAL_TYPE; - val |= RG_PE1_H_XTAL_TYPE_VAL(0x00); - phy_write(phy, val, RG_PE1_FRC_H_XTAL_REG); + mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG, + RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE, + RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE_VAL(0x00)); /* disable port */ - offset = (instance->index != 1) ? - RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH; - val = phy_read(phy, offset); - val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN); - val |= RG_PE1_FRC_PHY_EN; - phy_write(phy, val, offset); - - /* Set Pre-divider ratio (for host mode) */ - val = phy_read(phy, RG_PE1_H_PLL_REG); - val &= ~(RG_PE1_H_PLL_PREDIV); - - if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */ - val |= RG_PE1_H_PLL_PREDIV_VAL(0x01); - phy_write(phy, val, RG_PE1_H_PLL_REG); + mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + + if (phy->has_dual_port) { + mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + } + + if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ + /* Set Pre-divider ratio (for host mode) */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, + RG_PE1_H_PLL_PREDIV, + RG_PE1_H_PLL_PREDIV_VAL(0x01)); dev_info(dev, "Xtal is 40MHz\n"); - } else { /* 25MHz | 20MHz Xtal */ - val |= RG_PE1_H_PLL_PREDIV_VAL(0x00); - phy_write(phy, val, RG_PE1_H_PLL_REG); - if (reg >= 6) { - dev_info(dev, "Xtal is 25MHz\n"); - - /* Select feedback clock */ - val = phy_read(phy, RG_PE1_H_PLL_FBKSEL_REG); - val &= ~(RG_PE1_H_PLL_FBKSEL); - val |= RG_PE1_H_PLL_FBKSEL_VAL(0x01); - phy_write(phy, val, RG_PE1_H_PLL_FBKSEL_REG); - - /* DDS NCPO PCW (for host mode) */ - val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG); - val &= ~(RG_PE1_H_LCDDS_SSC_PRD); - val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000); - phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG); - - /* DDS SSC dither period control */ - val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG); - val &= ~(RG_PE1_H_LCDDS_SSC_PRD); - val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d); - phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG); - - /* DDS SSC dither amplitude control */ - val = phy_read(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG); - val &= ~(RG_PE1_H_LCDDS_SSC_DELTA | - RG_PE1_H_LCDDS_SSC_DELTA1); - val |= RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a); - val |= RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a); - phy_write(phy, val, RG_PE1_H_LCDDS_SSC_DELTA_REG); - } else { - dev_info(dev, "Xtal is 20MHz\n"); - } + } else if (xtal_mode >= 6) { /* 25MHz Xal */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, + RG_PE1_H_PLL_PREDIV, + RG_PE1_H_PLL_PREDIV_VAL(0x00)); + /* Select feedback clock */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG, + RG_PE1_H_PLL_FBKSEL, + RG_PE1_H_PLL_FBKSEL_VAL(0x01)); + /* DDS NCPO PCW (for host mode) */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, + RG_PE1_H_LCDDS_SSC_PRD, + RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000)); + /* DDS SSC dither period control */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, + RG_PE1_H_LCDDS_SSC_PRD, + RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d)); + /* DDS SSC dither amplitude control */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG, + RG_PE1_H_LCDDS_SSC_DELTA | + RG_PE1_H_LCDDS_SSC_DELTA1, + RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a) | + RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a)); + dev_info(dev, "Xtal is 25MHz\n"); + } else { /* 20MHz Xtal */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, + RG_PE1_H_PLL_PREDIV, + RG_PE1_H_PLL_PREDIV_VAL(0x00)); + + dev_info(dev, "Xtal is 20MHz\n"); } /* DDS clock inversion */ - val = phy_read(phy, RG_PE1_LCDDS_CLK_PH_INV_REG); - val &= ~(RG_PE1_LCDDS_CLK_PH_INV); - val |= RG_PE1_LCDDS_CLK_PH_INV; - phy_write(phy, val, RG_PE1_LCDDS_CLK_PH_INV_REG); + mt7621_phy_rmw(phy, RG_PE1_LCDDS_CLK_PH_INV_REG, + RG_PE1_LCDDS_CLK_PH_INV, RG_PE1_LCDDS_CLK_PH_INV); /* Set PLL bits */ - val = phy_read(phy, RG_PE1_H_PLL_REG); - val &= ~(RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR | - RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN); - val |= RG_PE1_H_PLL_BC_VAL(0x02); - val |= RG_PE1_H_PLL_BP_VAL(0x06); - val |= RG_PE1_H_PLL_IR_VAL(0x02); - val |= RG_PE1_H_PLL_IC_VAL(0x01); - val |= RG_PE1_PLL_DIVEN_VAL(0x02); - phy_write(phy, val, RG_PE1_H_PLL_REG); - - val = phy_read(phy, RG_PE1_H_PLL_BR_REG); - val &= ~(RG_PE1_H_PLL_BR); - val |= RG_PE1_H_PLL_BR_VAL(0x00); - phy_write(phy, val, RG_PE1_H_PLL_BR_REG); - - if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, + RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR | + RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN, + RG_PE1_H_PLL_BC_VAL(0x02) | RG_PE1_H_PLL_BP_VAL(0x06) | + RG_PE1_H_PLL_IR_VAL(0x02) | RG_PE1_H_PLL_IC_VAL(0x01) | + RG_PE1_PLL_DIVEN_VAL(0x02)); + + mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, + RG_PE1_H_PLL_BR, RG_PE1_H_PLL_BR_VAL(0x00)); + + if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ /* set force mode enable of da_pe1_mstckdiv */ - val = phy_read(phy, RG_PE1_MSTCKDIV_REG); - val &= ~(RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV); - val |= (RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV); - phy_write(phy, val, RG_PE1_MSTCKDIV_REG); + mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG, + RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV, + RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV); } } static int mt7621_pci_phy_init(struct phy *phy) { - struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy); - struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent); + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); if (mphy->bypass_pipe_rst) - mt7621_bypass_pipe_rst(mphy, instance); + mt7621_bypass_pipe_rst(mphy); - mt7621_set_phy_for_ssc(mphy, instance); + mt7621_set_phy_for_ssc(mphy); return 0; } static int mt7621_pci_phy_power_on(struct phy *phy) { - struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy); - struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent); - u32 offset = (instance->index != 1) ? - RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH; - u32 val; + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); /* Enable PHY and disable force mode */ - val = phy_read(mphy, offset); - val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN); - val |= (RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN); - phy_write(mphy, val, offset); + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, + RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); + + if (mphy->has_dual_port) { + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); + } return 0; } static int mt7621_pci_phy_power_off(struct phy *phy) { - struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy); - struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent); - u32 offset = (instance->index != 1) ? - RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH; - u32 val; + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); /* Disable PHY */ - val = phy_read(mphy, offset); - val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN); - val |= RG_PE1_FRC_PHY_EN; - phy_write(mphy, val, offset); + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + + if (mphy->has_dual_port) { + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + } return 0; } @@ -298,13 +281,15 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev, { struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev); - if (args->args_count == 0) - return mt7621_phy->phys[0]->phy; - if (WARN_ON(args->args[0] >= MAX_PHYS)) return ERR_PTR(-ENODEV); - return mt7621_phy->phys[args->args[0]]->phy; + mt7621_phy->has_dual_port = args->args[0]; + + dev_info(dev, "PHY for 0x%08x (dual port = %d)\n", + (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port); + + return mt7621_phy->phy; } static const struct soc_device_attribute mt7621_pci_quirks_match[] = { @@ -325,19 +310,11 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev) struct phy_provider *provider; struct mt7621_pci_phy *phy; struct resource *res; - int port; - void __iomem *port_base; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; - phy->nphys = MAX_PHYS; - phy->phys = devm_kcalloc(dev, phy->nphys, - sizeof(*phy->phys), GFP_KERNEL); - if (!phy->phys) - return -ENOMEM; - attr = soc_device_match(mt7621_pci_quirks_match); if (attr) phy->bypass_pipe_rst = true; @@ -351,39 +328,25 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev) return -ENXIO; } - port_base = devm_ioremap_resource(dev, res); - if (IS_ERR(port_base)) { + phy->port_base = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->port_base)) { dev_err(dev, "failed to remap phy regs\n"); - return PTR_ERR(port_base); + return PTR_ERR(phy->port_base); } - phy->regmap = devm_regmap_init_mmio(phy->dev, port_base, + phy->regmap = devm_regmap_init_mmio(phy->dev, phy->port_base, &mt7621_pci_phy_regmap_config); if (IS_ERR(phy->regmap)) return PTR_ERR(phy->regmap); - for (port = 0; port < MAX_PHYS; port++) { - struct mt7621_pci_phy_instance *instance; - struct phy *pphy; - - instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); - if (!instance) - return -ENOMEM; - - phy->phys[port] = instance; - - pphy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops); - if (IS_ERR(phy)) { - dev_err(dev, "failed to create phy\n"); - return PTR_ERR(phy); - } - - instance->port_base = port_base; - instance->phy = pphy; - instance->index = port; - phy_set_drvdata(pphy, instance); + phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "failed to create phy\n"); + return PTR_ERR(phy); } + phy_set_drvdata(phy->phy, phy); + provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate); return PTR_ERR_OR_ZERO(provider); @@ -403,12 +366,7 @@ static struct platform_driver mt7621_pci_phy_driver = { }, }; -static int __init mt7621_pci_phy_drv_init(void) -{ - return platform_driver_register(&mt7621_pci_phy_driver); -} - -module_init(mt7621_pci_phy_drv_init); +builtin_platform_driver(mt7621_pci_phy_driver); MODULE_AUTHOR("Sergio Paracuellos <sergio.paracuellos@gmail.com>"); MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver"); diff --git a/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt b/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt index 604ec813bd45..327a68267309 100644 --- a/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt +++ b/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt @@ -6,7 +6,6 @@ Required properties: - reg: Base addresses and lengths of the PCIe subsys and root ports. - bus-range: Range of bus numbers associated with this controller. - #address-cells: Address representation for root ports (must be 3) -- perst-gpio: PCIe reset signal line. - pinctrl-names : The pin control state names. - pinctrl-0: The "default" pinctrl state. - #size-cells: Size representation for root ports (must be 2) @@ -24,6 +23,7 @@ Required properties: See ../clocks/clock-bindings.txt for details. - clock-names: Must be "pcie0", "pcie1", "pcieN"... based on the number of root ports. +- reset-gpios: GPIO specs for the reset pins. In addition, the device tree node must have sub-nodes describing each PCIe port interface, having the following mandatory properties: @@ -49,7 +49,6 @@ Example for MT7621: #address-cells = <3>; #size-cells = <2>; - perst-gpio = <&gpio 19 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pcie_pins>; @@ -74,6 +73,10 @@ Example for MT7621: clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>; clock-names = "pcie0", "pcie1", "pcie2"; + reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>, + <&gpio 8 GPIO_ACTIVE_LOW>, + <&gpio 7 GPIO_ACTIVE_LOW>; + pcie@0,0 { reg = <0x0000 0 0 0 0>; #address-cells = <3>; diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c index 3633c924848e..f58e3a51fc71 100644 --- a/drivers/staging/mt7621-pci/pci-mt7621.c +++ b/drivers/staging/mt7621-pci/pci-mt7621.c @@ -45,8 +45,6 @@ /* rt_sysc_membase relative registers */ #define RALINK_CLKCFG1 0x30 -#define RALINK_PCIE_CLK_GEN 0x7c -#define RALINK_PCIE_CLK_GEN1 0x80 /* Host-PCI bridge registers */ #define RALINK_PCI_PCICFG_ADDR 0x0000 @@ -57,13 +55,13 @@ #define RALINK_PCI_IOBASE 0x002C /* PCICFG virtual bridges */ -#define MT7621_BR0_MASK GENMASK(19, 16) -#define MT7621_BR1_MASK GENMASK(23, 20) -#define MT7621_BR2_MASK GENMASK(27, 24) -#define MT7621_BR_ALL_MASK GENMASK(27, 16) -#define MT7621_BR0_SHIFT 16 -#define MT7621_BR1_SHIFT 20 -#define MT7621_BR2_SHIFT 24 +#define PCIE_P2P_MAX 3 +#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4) +#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0) +#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1) +#define PCIE_P2P_BR_DEVNUM2_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(2) +#define PCIE_P2P_BR_DEVNUM_MASK 0xf +#define PCIE_P2P_BR_DEVNUM_MASK_FULL (0xfff << PCIE_P2P_BR_DEVNUM0_SHIFT) /* PCIe RC control registers */ #define MT7621_PCIE_OFFSET 0x2000 @@ -85,14 +83,10 @@ #define PCIE_PORT_CLK_EN(x) BIT(24 + (x)) #define PCIE_PORT_LINKUP BIT(0) -#define PCIE_CLK_GEN_EN BIT(31) -#define PCIE_CLK_GEN_DIS 0 -#define PCIE_CLK_GEN1_DIS GENMASK(30, 24) -#define PCIE_CLK_GEN1_EN (BIT(27) | BIT(25)) #define MEMORY_BASE 0x0 #define PERST_MODE_MASK GENMASK(11, 10) #define PERST_MODE_GPIO BIT(10) -#define PERST_DELAY_US 1000 +#define PERST_DELAY_MS 100 /** * struct mt7621_pcie_port - PCIe port information @@ -101,6 +95,7 @@ * @pcie: pointer to PCIe host info * @phy: pointer to PHY control block * @pcie_rst: pointer to port reset control + * @gpio_rst: gpio reset * @slot: port slot * @enabled: indicates if port is enabled */ @@ -110,6 +105,7 @@ struct mt7621_pcie_port { struct mt7621_pcie *pcie; struct phy *phy; struct reset_control *pcie_rst; + struct gpio_desc *gpio_rst; u32 slot; bool enabled; }; @@ -122,9 +118,8 @@ struct mt7621_pcie_port { * @busn: bus range * @offset: IO / Memory offset * @dev: Pointer to PCIe device + * @io_map_base: virtual memory base address for io * @ports: pointer to PCIe port information - * @perst: gpio reset - * @rst: pointer to pcie reset * @resets_inverted: depends on chip revision * reset lines are inverted. */ @@ -138,9 +133,8 @@ struct mt7621_pcie { resource_size_t mem; resource_size_t io; } offset; + unsigned long io_map_base; struct list_head ports; - struct gpio_desc *perst; - struct reset_control *rst; bool resets_inverted; }; @@ -154,6 +148,15 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg) writel(val, pcie->base + reg); } +static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set) +{ + u32 val = readl(pcie->base + reg); + + val &= ~clr; + val |= set; + writel(val, pcie->base + reg); +} + static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg) { return readl(port->base + reg); @@ -207,16 +210,16 @@ static void write_config(struct mt7621_pcie *pcie, unsigned int dev, pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); } -static inline void mt7621_perst_gpio_pcie_assert(struct mt7621_pcie *pcie) +static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port) { - gpiod_set_value(pcie->perst, 0); - mdelay(PERST_DELAY_US); + if (port->gpio_rst) + gpiod_set_value(port->gpio_rst, 1); } -static inline void mt7621_perst_gpio_pcie_deassert(struct mt7621_pcie *pcie) +static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port) { - gpiod_set_value(pcie->perst, 1); - mdelay(PERST_DELAY_US); + if (port->gpio_rst) + gpiod_set_value(port->gpio_rst, 0); } static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port) @@ -224,6 +227,11 @@ static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port) return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0; } +static inline void mt7621_pcie_port_clk_enable(struct mt7621_pcie_port *port) +{ + rt_sysc_m32(0, PCIE_PORT_CLK_EN(port->slot), RALINK_CLKCFG1); +} + static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port) { rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1); @@ -249,13 +257,6 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port) reset_control_assert(port->pcie_rst); } -static void mt7621_reset_port(struct mt7621_pcie_port *port) -{ - mt7621_control_assert(port); - msleep(100); - mt7621_control_deassert(port); -} - static void setup_cm_memory_region(struct mt7621_pcie *pcie) { struct resource *mem_resource = &pcie->mem; @@ -292,22 +293,21 @@ static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie) } for_each_of_pci_range(&parser, &range) { - struct resource *res = NULL; - switch (range.flags & IORESOURCE_TYPE_BITS) { case IORESOURCE_IO: - ioremap(range.cpu_addr, range.size); - res = &pcie->io; + pcie->io_map_base = + (unsigned long)ioremap(range.cpu_addr, + range.size); + of_pci_range_to_resource(&range, node, &pcie->io); + pcie->io.start = range.cpu_addr; + pcie->io.end = range.cpu_addr + range.size - 1; pcie->offset.io = 0x00000000UL; break; case IORESOURCE_MEM: - res = &pcie->mem; + of_pci_range_to_resource(&range, node, &pcie->mem); pcie->offset.mem = 0x00000000UL; break; } - - if (res) - of_pci_range_to_resource(&range, node, res); } err = of_pci_parse_bus_range(node, &pcie->busn); @@ -319,6 +319,8 @@ static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie) pcie->busn.flags = IORESOURCE_BUS; } + set_io_port_base(pcie->io_map_base); + return 0; } @@ -356,9 +358,16 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie, snprintf(name, sizeof(name), "pcie-phy%d", slot); port->phy = devm_phy_get(dev, name); - if (IS_ERR(port->phy)) + if (IS_ERR(port->phy) && slot != 1) return PTR_ERR(port->phy); + port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot, + GPIOD_OUT_LOW); + if (IS_ERR(port->gpio_rst)) { + dev_err(dev, "Failed to get GPIO for PCIe%d\n", slot); + return PTR_ERR(port->gpio_rst); + } + port->slot = slot; port->pcie = pcie; @@ -375,12 +384,6 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie) struct resource regs; int err; - pcie->perst = devm_gpiod_get(dev, "perst", GPIOD_OUT_HIGH); - if (IS_ERR(pcie->perst)) { - dev_err(dev, "failed to get gpio perst\n"); - return PTR_ERR(pcie->perst); - } - err = of_address_to_resource(node, 0, ®s); if (err) { dev_err(dev, "missing \"reg\" property\n"); @@ -391,12 +394,6 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie) if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); - pcie->rst = devm_reset_control_get_exclusive(dev, "pcie"); - if (PTR_ERR(pcie->rst) == -EPROBE_DEFER) { - dev_err(dev, "failed to get pcie reset control\n"); - return PTR_ERR(pcie->rst); - } - for_each_available_child_of_node(node, child) { int slot; @@ -426,12 +423,6 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port) u32 slot = port->slot; int err; - /* - * Any MT7621 Ralink pcie controller that doesn't have 0x0101 at - * the end of the chip_id has inverted PCI resets. - */ - mt7621_reset_port(port); - err = phy_init(port->phy); if (err) { dev_err(dev, "failed to initialize port%d phy\n", slot); @@ -450,34 +441,66 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port) return 0; } +static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie) +{ + struct mt7621_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) { + /* PCIe RC reset assert */ + mt7621_control_assert(port); + + /* PCIe EP reset assert */ + mt7621_rst_gpio_pcie_assert(port); + } + + mdelay(PERST_DELAY_MS); +} + +static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie) +{ + struct mt7621_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) + mt7621_control_deassert(port); +} + +static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie) +{ + struct mt7621_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) + mt7621_rst_gpio_pcie_deassert(port); + + mdelay(PERST_DELAY_MS); +} + static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie) { struct device *dev = pcie->dev; struct mt7621_pcie_port *port, *tmp; - u32 val = 0; int err; rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE); - mt7621_perst_gpio_pcie_assert(pcie); + mt7621_pcie_reset_assert(pcie); + mt7621_pcie_reset_rc_deassert(pcie); list_for_each_entry_safe(port, tmp, &pcie->ports, list) { u32 slot = port->slot; + if (slot == 1) { + port->enabled = true; + continue; + } + err = mt7621_pcie_init_port(port); if (err) { dev_err(dev, "Initiating port %d failed\n", slot); list_del(&port->list); - } else { - val = read_config(pcie, slot, PCIE_FTS_NUM); - dev_info(dev, "Port %d N_FTS = %x\n", slot, - (unsigned int)val); } } - reset_control_assert(pcie->rst); - - mt7621_perst_gpio_pcie_deassert(pcie); + mt7621_pcie_reset_ep_deassert(pcie); list_for_each_entry(port, &pcie->ports, list) { u32 slot = port->slot; @@ -485,19 +508,13 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie) if (!mt7621_pcie_port_is_linkup(port)) { dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n", slot); - phy_power_off(port->phy); + if (slot != 1) + phy_power_off(port->phy); mt7621_control_assert(port); mt7621_pcie_port_clk_disable(port); port->enabled = false; } } - - rt_sysc_m32(0x30, 2 << 4, SYSC_REG_SYSTEM_CONFIG1); - rt_sysc_m32(PCIE_CLK_GEN_EN, PCIE_CLK_GEN_DIS, RALINK_PCIE_CLK_GEN); - rt_sysc_m32(PCIE_CLK_GEN1_DIS, PCIE_CLK_GEN1_EN, RALINK_PCIE_CLK_GEN1); - rt_sysc_m32(PCIE_CLK_GEN_DIS, PCIE_CLK_GEN_EN, RALINK_PCIE_CLK_GEN); - msleep(50); - reset_control_deassert(pcie->rst); } static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port) @@ -531,10 +548,15 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie) u32 slot; u32 val; + /* Setup MEMWIN and IOWIN */ + pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE); + pcie_write(pcie, pcie->io.start, RALINK_PCI_IOBASE); + list_for_each_entry(port, &pcie->ports, list) { if (port->enabled) { + mt7621_pcie_port_clk_enable(port); mt7621_pcie_enable_port(port); - dev_info(dev, "PCIE%d enabled\n", num_slots_enabled); + dev_info(dev, "PCIE%d enabled\n", port->slot); num_slots_enabled++; } } @@ -554,7 +576,9 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie) static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie) { u32 pcie_link_status = 0; - u32 val = 0; + u32 n; + int i; + u32 p2p_br_devnum[PCIE_P2P_MAX]; struct mt7621_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) { @@ -567,50 +591,20 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie) if (pcie_link_status == 0) return -1; - /* - * pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num - * 3'b000 x x x - * 3'b001 x x 0 - * 3'b010 x 0 x - * 3'b011 x 1 0 - * 3'b100 0 x x - * 3'b101 1 x 0 - * 3'b110 1 0 x - * 3'b111 2 1 0 - */ - switch (pcie_link_status) { - case 2: - val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR); - val &= ~(MT7621_BR0_MASK | MT7621_BR1_MASK); - val |= 0x1 << MT7621_BR0_SHIFT; - val |= 0x0 << MT7621_BR1_SHIFT; - pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR); - break; - case 4: - val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR); - val &= ~MT7621_BR_ALL_MASK; - val |= 0x1 << MT7621_BR0_SHIFT; - val |= 0x2 << MT7621_BR1_SHIFT; - val |= 0x0 << MT7621_BR2_SHIFT; - pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR); - break; - case 5: - val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR); - val &= ~MT7621_BR_ALL_MASK; - val |= 0x0 << MT7621_BR0_SHIFT; - val |= 0x2 << MT7621_BR1_SHIFT; - val |= 0x1 << MT7621_BR2_SHIFT; - pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR); - break; - case 6: - val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR); - val &= ~MT7621_BR_ALL_MASK; - val |= 0x2 << MT7621_BR0_SHIFT; - val |= 0x0 << MT7621_BR1_SHIFT; - val |= 0x1 << MT7621_BR2_SHIFT; - pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR); - break; - } + n = 0; + for (i = 0; i < PCIE_P2P_MAX; i++) + if (pcie_link_status & BIT(i)) + p2p_br_devnum[i] = n++; + + for (i = 0; i < PCIE_P2P_MAX; i++) + if ((pcie_link_status & BIT(i)) == 0) + p2p_br_devnum[i] = n++; + + pcie_rmw(pcie, RALINK_PCI_PCICFG_ADDR, + PCIE_P2P_BR_DEVNUM_MASK_FULL, + (p2p_br_devnum[0] << PCIE_P2P_BR_DEVNUM0_SHIFT) | + (p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) | + (p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT)); return 0; } @@ -678,11 +672,15 @@ static int mt7621_pci_probe(struct platform_device *pdev) return err; } + err = mt7621_pci_parse_request_of_pci_ranges(pcie); + if (err) { + dev_err(dev, "Error requesting pci resources from ranges"); + return err; + } + /* set resources limits */ - iomem_resource.start = 0; - iomem_resource.end = ~0UL; /* no limit */ - ioport_resource.start = 0; - ioport_resource.end = ~0UL; /* no limit */ + ioport_resource.start = pcie->io.start; + ioport_resource.end = pcie->io.end; mt7621_pcie_init_ports(pcie); @@ -694,12 +692,6 @@ static int mt7621_pci_probe(struct platform_device *pdev) mt7621_pcie_enable_ports(pcie); - err = mt7621_pci_parse_request_of_pci_ranges(pcie); - if (err) { - dev_err(dev, "Error requesting pci resources from ranges"); - return err; - } - setup_cm_memory_region(pcie); err = mt7621_pcie_request_resources(pcie, &res); @@ -731,9 +723,4 @@ static struct platform_driver mt7621_pci_driver = { }, }; -static int __init mt7621_pci_init(void) -{ - return platform_driver_register(&mt7621_pci_driver); -} - -module_init(mt7621_pci_init); +builtin_platform_driver(mt7621_pci_driver); diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h index f152d84099a2..c8d4c13424c6 100644 --- a/drivers/staging/netlogic/platform_net.h +++ b/drivers/staging/netlogic/platform_net.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) - * +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved */ diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h index 518ea809b8fa..8365b744f9b3 100644 --- a/drivers/staging/netlogic/xlr_net.h +++ b/drivers/staging/netlogic/xlr_net.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) - * +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved */ diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig new file mode 100644 index 000000000000..6a5d842ee0f2 --- /dev/null +++ b/drivers/staging/octeon-usb/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +config OCTEON_USB + tristate "Cavium Networks Octeon USB support" + depends on CAVIUM_OCTEON_SOC && USB + help + This driver supports USB host controller on some Cavium + Networks' products in the Octeon family. + + To compile this driver as a module, choose M here. The module + will be called octeon-hcd. + diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile new file mode 100644 index 000000000000..9873a0130ad5 --- /dev/null +++ b/drivers/staging/octeon-usb/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-${CONFIG_OCTEON_USB} := octeon-hcd.o diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO new file mode 100644 index 000000000000..2b29acca5caa --- /dev/null +++ b/drivers/staging/octeon-usb/TODO @@ -0,0 +1,8 @@ +This driver is functional and has been tested on EdgeRouter Lite, +D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage. + +TODO: + - kernel coding style + - checkpatch warnings + +Contact: Aaro Koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c new file mode 100644 index 000000000000..61471a19d4e6 --- /dev/null +++ b/drivers/staging/octeon-usb/octeon-hcd.c @@ -0,0 +1,3737 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Cavium Networks + * + * Some parts of the code were originally released under BSD license: + * + * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of Cavium Networks nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * This Software, including technical data, may be subject to U.S. export + * control laws, including the U.S. Export Administration Act and its associated + * regulations, and may be subject to export or import regulations in other + * countries. + * + * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" + * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR + * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO + * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION + * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM + * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, + * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF + * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR + * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR + * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. + */ + +#include <linux/usb.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/usb/hcd.h> +#include <linux/prefetch.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> + +#include <asm/octeon/octeon.h> + +#include "octeon-hcd.h" + +/** + * enum cvmx_usb_speed - the possible USB device speeds + * + * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps + * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps + * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps + */ +enum cvmx_usb_speed { + CVMX_USB_SPEED_HIGH = 0, + CVMX_USB_SPEED_FULL = 1, + CVMX_USB_SPEED_LOW = 2, +}; + +/** + * enum cvmx_usb_transfer - the possible USB transfer types + * + * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status + * transfers + * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low + * priority periodic transfers + * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority + * transfers + * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority + * periodic transfers + */ +enum cvmx_usb_transfer { + CVMX_USB_TRANSFER_CONTROL = 0, + CVMX_USB_TRANSFER_ISOCHRONOUS = 1, + CVMX_USB_TRANSFER_BULK = 2, + CVMX_USB_TRANSFER_INTERRUPT = 3, +}; + +/** + * enum cvmx_usb_direction - the transfer directions + * + * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host + * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon + */ +enum cvmx_usb_direction { + CVMX_USB_DIRECTION_OUT, + CVMX_USB_DIRECTION_IN, +}; + +/** + * enum cvmx_usb_status - possible callback function status codes + * + * @CVMX_USB_STATUS_OK: The transaction / operation finished without + * any errors + * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented + * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight + * by a user call to cvmx_usb_cancel + * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected + * error status + * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response + * from the device + * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the + * device even after a number of retries + * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle + * error even after a number of retries + * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error + * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error + * even after a number of retries + */ +enum cvmx_usb_status { + CVMX_USB_STATUS_OK, + CVMX_USB_STATUS_SHORT, + CVMX_USB_STATUS_CANCEL, + CVMX_USB_STATUS_ERROR, + CVMX_USB_STATUS_STALL, + CVMX_USB_STATUS_XACTERR, + CVMX_USB_STATUS_DATATGLERR, + CVMX_USB_STATUS_BABBLEERR, + CVMX_USB_STATUS_FRAMEERR, +}; + +/** + * struct cvmx_usb_port_status - the USB port status information + * + * @port_enabled: 1 = Usb port is enabled, 0 = disabled + * @port_over_current: 1 = Over current detected, 0 = Over current not + * detected. Octeon doesn't support over current detection. + * @port_powered: 1 = Port power is being supplied to the device, 0 = + * power is off. Octeon doesn't support turning port power + * off. + * @port_speed: Current port speed. + * @connected: 1 = A device is connected to the port, 0 = No device is + * connected. + * @connect_change: 1 = Device connected state changed since the last set + * status call. + */ +struct cvmx_usb_port_status { + u32 reserved : 25; + u32 port_enabled : 1; + u32 port_over_current : 1; + u32 port_powered : 1; + enum cvmx_usb_speed port_speed : 2; + u32 connected : 1; + u32 connect_change : 1; +}; + +/** + * struct cvmx_usb_iso_packet - descriptor for Isochronous packets + * + * @offset: This is the offset in bytes into the main buffer where this data + * is stored. + * @length: This is the length in bytes of the data. + * @status: This is the status of this individual packet transfer. + */ +struct cvmx_usb_iso_packet { + int offset; + int length; + enum cvmx_usb_status status; +}; + +/** + * enum cvmx_usb_initialize_flags - flags used by the initialization function + * + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal + * as clock source at USB_XO and + * USB_XI. + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V + * board clock source at USB_XO. + * USB_XI should be tied to GND. + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or + * crystal + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock + * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock + * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for + * data transfer use for the USB + */ +enum cvmx_usb_initialize_flags { + CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3, + CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3, + /* Bits 3-4 used to encode the clock frequency */ + CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5, +}; + +/** + * enum cvmx_usb_pipe_flags - internal flags for a pipe. + * + * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is + * actively using hardware. + * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed + * pipe is in the ping state. + */ +enum cvmx_usb_pipe_flags { + CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17, + CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18, +}; + +/* Maximum number of times to retry failed transactions */ +#define MAX_RETRIES 3 + +/* Maximum number of hardware channels supported by the USB block */ +#define MAX_CHANNELS 8 + +/* + * The low level hardware can transfer a maximum of this number of bytes in each + * transfer. The field is 19 bits wide + */ +#define MAX_TRANSFER_BYTES ((1 << 19) - 1) + +/* + * The low level hardware can transfer a maximum of this number of packets in + * each transfer. The field is 10 bits wide + */ +#define MAX_TRANSFER_PACKETS ((1 << 10) - 1) + +/** + * Logical transactions may take numerous low level + * transactions, especially when splits are concerned. This + * enum represents all of the possible stages a transaction can + * be in. Note that split completes are always even. This is so + * the NAK handler can backup to the previous low level + * transaction with a simple clearing of bit 0. + */ +enum cvmx_usb_stage { + CVMX_USB_STAGE_NON_CONTROL, + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE, + CVMX_USB_STAGE_SETUP, + CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE, + CVMX_USB_STAGE_DATA, + CVMX_USB_STAGE_DATA_SPLIT_COMPLETE, + CVMX_USB_STAGE_STATUS, + CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE, +}; + +/** + * struct cvmx_usb_transaction - describes each pending USB transaction + * regardless of type. These are linked together + * to form a list of pending requests for a pipe. + * + * @node: List node for transactions in the pipe. + * @type: Type of transaction, duplicated of the pipe. + * @flags: State flags for this transaction. + * @buffer: User's physical buffer address to read/write. + * @buffer_length: Size of the user's buffer in bytes. + * @control_header: For control transactions, physical address of the 8 + * byte standard header. + * @iso_start_frame: For ISO transactions, the starting frame number. + * @iso_number_packets: For ISO transactions, the number of packets in the + * request. + * @iso_packets: For ISO transactions, the sub packets in the request. + * @actual_bytes: Actual bytes transfer for this transaction. + * @stage: For control transactions, the current stage. + * @urb: URB. + */ +struct cvmx_usb_transaction { + struct list_head node; + enum cvmx_usb_transfer type; + u64 buffer; + int buffer_length; + u64 control_header; + int iso_start_frame; + int iso_number_packets; + struct cvmx_usb_iso_packet *iso_packets; + int xfersize; + int pktcnt; + int retries; + int actual_bytes; + enum cvmx_usb_stage stage; + struct urb *urb; +}; + +/** + * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon + * and some USB device. It contains a list of pending + * request to the device. + * + * @node: List node for pipe list + * @next: Pipe after this one in the list + * @transactions: List of pending transactions + * @interval: For periodic pipes, the interval between packets in + * frames + * @next_tx_frame: The next frame this pipe is allowed to transmit on + * @flags: State flags for this pipe + * @device_speed: Speed of device connected to this pipe + * @transfer_type: Type of transaction supported by this pipe + * @transfer_dir: IN or OUT. Ignored for Control + * @multi_count: Max packet in a row for the device + * @max_packet: The device's maximum packet size in bytes + * @device_addr: USB device address at other end of pipe + * @endpoint_num: USB endpoint number at other end of pipe + * @hub_device_addr: Hub address this device is connected to + * @hub_port: Hub port this device is connected to + * @pid_toggle: This toggles between 0/1 on every packet send to track + * the data pid needed + * @channel: Hardware DMA channel for this pipe + * @split_sc_frame: The low order bits of the frame number the split + * complete should be sent on + */ +struct cvmx_usb_pipe { + struct list_head node; + struct list_head transactions; + u64 interval; + u64 next_tx_frame; + enum cvmx_usb_pipe_flags flags; + enum cvmx_usb_speed device_speed; + enum cvmx_usb_transfer transfer_type; + enum cvmx_usb_direction transfer_dir; + int multi_count; + u16 max_packet; + u8 device_addr; + u8 endpoint_num; + u8 hub_device_addr; + u8 hub_port; + u8 pid_toggle; + u8 channel; + s8 split_sc_frame; +}; + +struct cvmx_usb_tx_fifo { + struct { + int channel; + int size; + u64 address; + } entry[MAX_CHANNELS + 1]; + int head; + int tail; +}; + +/** + * struct octeon_hcd - the state of the USB block + * + * lock: Serialization lock. + * init_flags: Flags passed to initialize. + * index: Which USB block this is for. + * idle_hardware_channels: Bit set for every idle hardware channel. + * usbcx_hprt: Stored port status so we don't need to read a CSR to + * determine splits. + * pipe_for_channel: Map channels to pipes. + * pipe: Storage for pipes. + * indent: Used by debug output to indent functions. + * port_status: Last port status used for change notification. + * idle_pipes: List of open pipes that have no transactions. + * active_pipes: Active pipes indexed by transfer type. + * frame_number: Increments every SOF interrupt for time keeping. + * active_split: Points to the current active split, or NULL. + */ +struct octeon_hcd { + spinlock_t lock; /* serialization lock */ + int init_flags; + int index; + int idle_hardware_channels; + union cvmx_usbcx_hprt usbcx_hprt; + struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS]; + int indent; + struct cvmx_usb_port_status port_status; + struct list_head idle_pipes; + struct list_head active_pipes[4]; + u64 frame_number; + struct cvmx_usb_transaction *active_split; + struct cvmx_usb_tx_fifo periodic; + struct cvmx_usb_tx_fifo nonperiodic; +}; + +/* + * This macro logically sets a single field in a CSR. It does the sequence + * read, modify, and write + */ +#define USB_SET_FIELD32(address, _union, field, value) \ + do { \ + union _union c; \ + \ + c.u32 = cvmx_usb_read_csr32(usb, address); \ + c.s.field = value; \ + cvmx_usb_write_csr32(usb, address, c.u32); \ + } while (0) + +/* Returns the IO address to push/pop stuff data from the FIFOs */ +#define USB_FIFO_ADDRESS(channel, usb_index) \ + (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000) + +/** + * struct octeon_temp_buffer - a bounce buffer for USB transfers + * @orig_buffer: the original buffer passed by the USB stack + * @data: the newly allocated temporary buffer (excluding meta-data) + * + * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If + * the buffer is too short, we need to allocate a temporary one, and this struct + * represents it. + */ +struct octeon_temp_buffer { + void *orig_buffer; + u8 data[]; +}; + +static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p) +{ + return container_of((void *)p, struct usb_hcd, hcd_priv); +} + +/** + * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer + * (if needed) + * @urb: URB. + * @mem_flags: Memory allocation flags. + * + * This function allocates a temporary bounce buffer whenever it's needed + * due to HW limitations. + */ +static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) +{ + struct octeon_temp_buffer *temp; + + if (urb->num_sgs || urb->sg || + (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || + !(urb->transfer_buffer_length % sizeof(u32))) + return 0; + + temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) + + sizeof(*temp), mem_flags); + if (!temp) + return -ENOMEM; + + temp->orig_buffer = urb->transfer_buffer; + if (usb_urb_dir_out(urb)) + memcpy(temp->data, urb->transfer_buffer, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->data; + urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; + + return 0; +} + +/** + * octeon_free_temp_buffer - free a temporary buffer used by USB transfers. + * @urb: URB. + * + * Frees a buffer allocated by octeon_alloc_temp_buffer(). + */ +static void octeon_free_temp_buffer(struct urb *urb) +{ + struct octeon_temp_buffer *temp; + size_t length; + + if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) + return; + + temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer, + data); + if (usb_urb_dir_in(urb)) { + if (usb_pipeisoc(urb->pipe)) + length = urb->transfer_buffer_length; + else + length = urb->actual_length; + + memcpy(temp->orig_buffer, urb->transfer_buffer, length); + } + urb->transfer_buffer = temp->orig_buffer; + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; + kfree(temp); +} + +/** + * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma(). + * @hcd: USB HCD structure. + * @urb: URB. + * @mem_flags: Memory allocation flags. + */ +static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + int ret; + + ret = octeon_alloc_temp_buffer(urb, mem_flags); + if (ret) + return ret; + + ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); + if (ret) + octeon_free_temp_buffer(urb); + + return ret; +} + +/** + * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma() + * @hcd: USB HCD structure. + * @urb: URB. + */ +static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + usb_hcd_unmap_urb_for_dma(hcd, urb); + octeon_free_temp_buffer(urb); +} + +/** + * Read a USB 32bit CSR. It performs the necessary address swizzle + * for 32bit CSRs and logs the value in a readable format if + * debugging is on. + * + * @usb: USB block this access is for + * @address: 64bit address to read + * + * Returns: Result of the read + */ +static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address) +{ + return cvmx_read64_uint32(address ^ 4); +} + +/** + * Write a USB 32bit CSR. It performs the necessary address + * swizzle for 32bit CSRs and logs the value in a readable format + * if debugging is on. + * + * @usb: USB block this access is for + * @address: 64bit address to write + * @value: Value to write + */ +static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb, + u64 address, u32 value) +{ + cvmx_write64_uint32(address ^ 4, value); + cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); +} + +/** + * Return non zero if this pipe connects to a non HIGH speed + * device through a high speed hub. + * + * @usb: USB block this access is for + * @pipe: Pipe to check + * + * Returns: Non zero if we need to do split transactions + */ +static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + return pipe->device_speed != CVMX_USB_SPEED_HIGH && + usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH; +} + +/** + * Trivial utility function to return the correct PID for a pipe + * + * @pipe: pipe to check + * + * Returns: PID for pipe + */ +static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) +{ + if (pipe->pid_toggle) + return 2; /* Data1 */ + return 0; /* Data0 */ +} + +/* Loops through register until txfflsh or rxfflsh become zero.*/ +static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type) +{ + int result; + u64 address = CVMX_USBCX_GRSTCTL(usb->index); + u64 done = cvmx_get_cycle() + 100 * + (u64)octeon_get_clock_rate / 1000000; + union cvmx_usbcx_grstctl c; + + while (1) { + c.u32 = cvmx_usb_read_csr32(usb, address); + if (fflsh_type == 0 && c.s.txfflsh == 0) { + result = 0; + break; + } else if (fflsh_type == 1 && c.s.rxfflsh == 0) { + result = 0; + break; + } else if (cvmx_get_cycle() > done) { + result = -1; + break; + } + + __delay(100); + } + return result; +} + +static void cvmx_fifo_setup(struct octeon_hcd *usb) +{ + union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3; + union cvmx_usbcx_gnptxfsiz npsiz; + union cvmx_usbcx_hptxfsiz psiz; + + usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GHWCFG3(usb->index)); + + /* + * Program the USBC_GRXFSIZ register to select the size of the receive + * FIFO (25%). + */ + USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz, + rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4); + + /* + * Program the USBC_GNPTXFSIZ register to select the size and the start + * address of the non-periodic transmit FIFO for nonperiodic + * transactions (50%). + */ + npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index)); + npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2; + npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32); + + /* + * Program the USBC_HPTXFSIZ register to select the size and start + * address of the periodic transmit FIFO for periodic transactions + * (25%). + */ + psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index)); + psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4; + psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32); + + /* Flush all FIFOs */ + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, txfnum, 0x10); + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, txfflsh, 1); + cvmx_wait_tx_rx(usb, 0); + USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), + cvmx_usbcx_grstctl, rxfflsh, 1); + cvmx_wait_tx_rx(usb, 1); +} + +/** + * Shutdown a USB port after a call to cvmx_usb_initialize(). + * The port should be disabled with all pipes closed when this + * function is called. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_shutdown(struct octeon_hcd *usb) +{ + union cvmx_usbnx_clk_ctl usbn_clk_ctl; + + /* Make sure all pipes are closed */ + if (!list_empty(&usb->idle_pipes) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) || + !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK])) + return -EBUSY; + + /* Disable the clocks and put them in power on reset */ + usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); + usbn_clk_ctl.s.enable = 1; + usbn_clk_ctl.s.por = 1; + usbn_clk_ctl.s.hclk_rst = 1; + usbn_clk_ctl.s.prst = 0; + usbn_clk_ctl.s.hrst = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + return 0; +} + +/** + * Initialize a USB port for use. This must be called before any + * other access to the Octeon USB port is made. The port starts + * off in the disabled state. + * + * @dev: Pointer to struct device for logging purposes. + * @usb: Pointer to struct octeon_hcd. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_initialize(struct device *dev, + struct octeon_hcd *usb) +{ + int channel; + int divisor; + int retries = 0; + union cvmx_usbcx_hcfg usbcx_hcfg; + union cvmx_usbnx_clk_ctl usbn_clk_ctl; + union cvmx_usbcx_gintsts usbc_gintsts; + union cvmx_usbcx_gahbcfg usbcx_gahbcfg; + union cvmx_usbcx_gintmsk usbcx_gintmsk; + union cvmx_usbcx_gusbcfg usbcx_gusbcfg; + union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; + +retry: + /* + * Power On Reset and PHY Initialization + * + * 1. Wait for DCOK to assert (nothing to do) + * + * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and + * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 + */ + usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index)); + usbn_clk_ctl.s.por = 1; + usbn_clk_ctl.s.hrst = 0; + usbn_clk_ctl.s.prst = 0; + usbn_clk_ctl.s.hclk_rst = 0; + usbn_clk_ctl.s.enable = 0; + /* + * 2b. Select the USB reference clock/crystal parameters by writing + * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] + */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) { + /* + * The USB port uses 12/24/48MHz 2.5V board clock + * source at USB_XO. USB_XI should be tied to GND. + * Most Octeon evaluation boards require this setting + */ + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || + OCTEON_IS_MODEL(OCTEON_CN56XX) || + OCTEON_IS_MODEL(OCTEON_CN50XX)) + /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */ + usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */ + else + /* From CN52XX manual */ + usbn_clk_ctl.s.p_rtype = 1; + + switch (usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) { + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: + usbn_clk_ctl.s.p_c_sel = 0; + break; + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: + usbn_clk_ctl.s.p_c_sel = 1; + break; + case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: + usbn_clk_ctl.s.p_c_sel = 2; + break; + } + } else { + /* + * The USB port uses a 12MHz crystal as clock source + * at USB_XO and USB_XI + */ + if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) + /* From CN31XX,CN30XX manual */ + usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */ + else + /* From CN56XX,CN52XX,CN50XX manuals. */ + usbn_clk_ctl.s.p_rtype = 0; + + usbn_clk_ctl.s.p_c_sel = 0; + } + /* + * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and + * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down + * such that USB is as close as possible to 125Mhz + */ + divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000); + /* Lower than 4 doesn't seem to work properly */ + if (divisor < 4) + divisor = 4; + usbn_clk_ctl.s.divide = divisor; + usbn_clk_ctl.s.divide2 = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + + /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */ + usbn_clk_ctl.s.hclk_rst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */ + __delay(64); + /* + * 3. Program the power-on reset field in the USBN clock-control + * register: + * USBN_CLK_CTL[POR] = 0 + */ + usbn_clk_ctl.s.por = 0; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 4. Wait 1 ms for PHY clock to start */ + mdelay(1); + /* + * 5. Program the Reset input from automatic test equipment field in the + * USBP control and status register: + * USBN_USBP_CTL_STATUS[ATE_RESET] = 1 + */ + usbn_usbp_ctl_status.u64 = + cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index)); + usbn_usbp_ctl_status.s.ate_reset = 1; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* 6. Wait 10 cycles */ + __delay(10); + /* + * 7. Clear ATE_RESET field in the USBN clock-control register: + * USBN_USBP_CTL_STATUS[ATE_RESET] = 0 + */ + usbn_usbp_ctl_status.s.ate_reset = 0; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* + * 8. Program the PHY reset field in the USBN clock-control register: + * USBN_CLK_CTL[PRST] = 1 + */ + usbn_clk_ctl.s.prst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* + * 9. Program the USBP control and status register to select host or + * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for + * device + */ + usbn_usbp_ctl_status.s.hst_mode = 0; + cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), + usbn_usbp_ctl_status.u64); + /* 10. Wait 1 us */ + udelay(1); + /* + * 11. Program the hreset_n field in the USBN clock-control register: + * USBN_CLK_CTL[HRST] = 1 + */ + usbn_clk_ctl.s.hrst = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + /* 12. Proceed to USB core initialization */ + usbn_clk_ctl.s.enable = 1; + cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); + udelay(1); + + /* + * USB Core Initialization + * + * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to + * determine USB core configuration parameters. + * + * Nothing needed + * + * 2. Program the following fields in the global AHB configuration + * register (USBC_GAHBCFG) + * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode + * Burst length, USBC_GAHBCFG[HBSTLEN] = 0 + * Nonperiodic TxFIFO empty level (slave mode only), + * USBC_GAHBCFG[NPTXFEMPLVL] + * Periodic TxFIFO empty level (slave mode only), + * USBC_GAHBCFG[PTXFEMPLVL] + * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 + */ + usbcx_gahbcfg.u32 = 0; + usbcx_gahbcfg.s.dmaen = !(usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_NO_DMA); + usbcx_gahbcfg.s.hbstlen = 0; + usbcx_gahbcfg.s.nptxfemplvl = 1; + usbcx_gahbcfg.s.ptxfemplvl = 1; + usbcx_gahbcfg.s.glblintrmsk = 1; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), + usbcx_gahbcfg.u32); + + /* + * 3. Program the following fields in USBC_GUSBCFG register. + * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0 + * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0 + * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5 + * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 + */ + usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GUSBCFG(usb->index)); + usbcx_gusbcfg.s.toutcal = 0; + usbcx_gusbcfg.s.ddrsel = 0; + usbcx_gusbcfg.s.usbtrdtim = 0x5; + usbcx_gusbcfg.s.phylpwrclksel = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), + usbcx_gusbcfg.u32); + + /* + * 4. The software must unmask the following bits in the USBC_GINTMSK + * register. + * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1 + * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1 + */ + usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTMSK(usb->index)); + usbcx_gintmsk.s.otgintmsk = 1; + usbcx_gintmsk.s.modemismsk = 1; + usbcx_gintmsk.s.hchintmsk = 1; + usbcx_gintmsk.s.sofmsk = 0; + /* We need RX FIFO interrupts if we don't have DMA */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + usbcx_gintmsk.s.rxflvlmsk = 1; + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), + usbcx_gintmsk.u32); + + /* + * Disable all channel interrupts. We'll enable them per channel later. + */ + for (channel = 0; channel < 8; channel++) + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + 0); + + /* + * Host Port Initialization + * + * 1. Program the host-port interrupt-mask field to unmask, + * USBC_GINTMSK[PRTINT] = 1 + */ + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, prtintmsk, 1); + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, disconnintmsk, 1); + + /* + * 2. Program the USBC_HCFG register to select full-speed host + * or high-speed host. + */ + usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index)); + usbcx_hcfg.s.fslssupp = 0; + usbcx_hcfg.s.fslspclksel = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32); + + cvmx_fifo_setup(usb); + + /* + * If the controller is getting port events right after the reset, it + * means the initialization failed. Try resetting the controller again + * in such case. This is seen to happen after cold boot on DSR-1000N. + */ + usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTSTS(usb->index)); + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), + usbc_gintsts.u32); + dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32); + if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint) + return 0; + if (retries++ >= 5) + return -EAGAIN; + dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n", + (int)usbc_gintsts.u32); + msleep(50); + cvmx_usb_shutdown(usb); + msleep(50); + goto retry; +} + +/** + * Reset a USB port. After this call succeeds, the USB port is + * online and servicing requests. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_reset_port(struct octeon_hcd *usb) +{ + usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPRT(usb->index)); + + /* Program the port reset bit to start the reset process */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtrst, 1); + + /* + * Wait at least 50ms (high speed), or 10ms (full speed) for the reset + * process to complete. + */ + mdelay(50); + + /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtrst, 0); + + /* + * Read the port speed field to get the enumerated speed, + * USBC_HPRT[PRTSPD]. + */ + usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPRT(usb->index)); +} + +/** + * Disable a USB port. After this call the USB port will not + * generate data transfers and will not generate events. + * Transactions in process will fail and call their + * associated callbacks. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_disable(struct octeon_hcd *usb) +{ + /* Disable the port */ + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, + prtena, 1); + return 0; +} + +/** + * Get the current state of the USB port. Use this call to + * determine if the usb port has anything connected, is enabled, + * or has some sort of error condition. The return value of this + * call has "changed" bits to signal of the value of some fields + * have changed between calls. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: Port status information + */ +static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hprt usbc_hprt; + struct cvmx_usb_port_status result; + + memset(&result, 0, sizeof(result)); + + usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); + result.port_enabled = usbc_hprt.s.prtena; + result.port_over_current = usbc_hprt.s.prtovrcurract; + result.port_powered = usbc_hprt.s.prtpwr; + result.port_speed = usbc_hprt.s.prtspd; + result.connected = usbc_hprt.s.prtconnsts; + result.connect_change = + result.connected != usb->port_status.connected; + + return result; +} + +/** + * Open a virtual pipe between the host and a USB device. A pipe + * must be opened before data can be transferred between a device + * and Octeon. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @device_addr: + * USB device address to open the pipe to + * (0-127). + * @endpoint_num: + * USB endpoint number to open the pipe to + * (0-15). + * @device_speed: + * The speed of the device the pipe is going + * to. This must match the device's speed, + * which may be different than the port speed. + * @max_packet: The maximum packet length the device can + * transmit/receive (low speed=0-8, full + * speed=0-1023, high speed=0-1024). This value + * comes from the standard endpoint descriptor + * field wMaxPacketSize bits <10:0>. + * @transfer_type: + * The type of transfer this pipe is for. + * @transfer_dir: + * The direction the pipe is in. This is not + * used for control pipes. + * @interval: For ISOCHRONOUS and INTERRUPT transfers, + * this is how often the transfer is scheduled + * for. All other transfers should specify + * zero. The units are in frames (8000/sec at + * high speed, 1000/sec for full speed). + * @multi_count: + * For high speed devices, this is the maximum + * allowed number of packet per microframe. + * Specify zero for non high speed devices. This + * value comes from the standard endpoint descriptor + * field wMaxPacketSize bits <12:11>. + * @hub_device_addr: + * Hub device address this device is connected + * to. Devices connected directly to Octeon + * use zero. This is only used when the device + * is full/low speed behind a high speed hub. + * The address will be of the high speed hub, + * not and full speed hubs after it. + * @hub_port: Which port on the hub the device is + * connected. Use zero for devices connected + * directly to Octeon. Like hub_device_addr, + * this is only used for full/low speed + * devices behind a high speed hub. + * + * Returns: A non-NULL value is a pipe. NULL means an error. + */ +static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb, + int device_addr, + int endpoint_num, + enum cvmx_usb_speed + device_speed, + int max_packet, + enum cvmx_usb_transfer + transfer_type, + enum cvmx_usb_direction + transfer_dir, + int interval, int multi_count, + int hub_device_addr, + int hub_port) +{ + struct cvmx_usb_pipe *pipe; + + pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC); + if (!pipe) + return NULL; + if ((device_speed == CVMX_USB_SPEED_HIGH) && + (transfer_dir == CVMX_USB_DIRECTION_OUT) && + (transfer_type == CVMX_USB_TRANSFER_BULK)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + pipe->device_addr = device_addr; + pipe->endpoint_num = endpoint_num; + pipe->device_speed = device_speed; + pipe->max_packet = max_packet; + pipe->transfer_type = transfer_type; + pipe->transfer_dir = transfer_dir; + INIT_LIST_HEAD(&pipe->transactions); + + /* + * All pipes use interval to rate limit NAK processing. Force an + * interval if one wasn't supplied + */ + if (!interval) + interval = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + pipe->interval = interval * 8; + /* Force start splits to be schedule on uFrame 0 */ + pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) + + pipe->interval; + } else { + pipe->interval = interval; + pipe->next_tx_frame = usb->frame_number + pipe->interval; + } + pipe->multi_count = multi_count; + pipe->hub_device_addr = hub_device_addr; + pipe->hub_port = hub_port; + pipe->pid_toggle = 0; + pipe->split_sc_frame = -1; + list_add_tail(&pipe->node, &usb->idle_pipes); + + /* + * We don't need to tell the hardware about this pipe yet since + * it doesn't have any submitted requests + */ + + return pipe; +} + +/** + * Poll the RX FIFOs and remove data as needed. This function is only used + * in non DMA mode. It is very important that this function be called quickly + * enough to prevent FIFO overflow. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb) +{ + union cvmx_usbcx_grxstsph rx_status; + int channel; + int bytes; + u64 address; + u32 *ptr; + + rx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GRXSTSPH(usb->index)); + /* Only read data if IN data is there */ + if (rx_status.s.pktsts != 2) + return; + /* Check if no data is available */ + if (!rx_status.s.bcnt) + return; + + channel = rx_status.s.chnum; + bytes = rx_status.s.bcnt; + if (!bytes) + return; + + /* Get where the DMA engine would have written this data */ + address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + + channel * 8); + + ptr = cvmx_phys_to_ptr(address); + cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8, + address + bytes); + + /* Loop writing the FIFO data for this packet into memory */ + while (bytes > 0) { + *ptr++ = cvmx_usb_read_csr32(usb, + USB_FIFO_ADDRESS(channel, usb->index)); + bytes -= 4; + } + CVMX_SYNCW; +} + +/** + * Fill the TX hardware fifo with data out of the software + * fifos + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @fifo: Software fifo to use + * @available: Amount of space in the hardware fifo + * + * Returns: Non zero if the hardware fifo was too small and needs + * to be serviced again. + */ +static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb, + struct cvmx_usb_tx_fifo *fifo, int available) +{ + /* + * We're done either when there isn't anymore space or the software FIFO + * is empty + */ + while (available && (fifo->head != fifo->tail)) { + int i = fifo->tail; + const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); + u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, + usb->index) ^ 4; + int words = available; + + /* Limit the amount of data to what the SW fifo has */ + if (fifo->entry[i].size <= available) { + words = fifo->entry[i].size; + fifo->tail++; + if (fifo->tail > MAX_CHANNELS) + fifo->tail = 0; + } + + /* Update the next locations and counts */ + available -= words; + fifo->entry[i].address += words * 4; + fifo->entry[i].size -= words; + + /* + * Write the HW fifo data. The read every three writes is due + * to an errata on CN3XXX chips + */ + while (words > 3) { + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_write64_uint32(csr_address, *ptr++); + cvmx_read64_uint64( + CVMX_USBNX_DMA0_INB_CHN0(usb->index)); + words -= 3; + } + cvmx_write64_uint32(csr_address, *ptr++); + if (--words) { + cvmx_write64_uint32(csr_address, *ptr++); + if (--words) + cvmx_write64_uint32(csr_address, *ptr++); + } + cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); + } + return fifo->head != fifo->tail; +} + +/** + * Check the hardware FIFOs and fill them as needed + * + * @usb: USB device state populated by cvmx_usb_initialize(). + */ +static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb) +{ + if (usb->periodic.head != usb->periodic.tail) { + union cvmx_usbcx_hptxsts tx_status; + + tx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HPTXSTS(usb->index)); + if (cvmx_usb_fill_tx_hw(usb, &usb->periodic, + tx_status.s.ptxfspcavail)) + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, ptxfempmsk, 1); + else + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, ptxfempmsk, 0); + } + + if (usb->nonperiodic.head != usb->nonperiodic.tail) { + union cvmx_usbcx_gnptxsts tx_status; + + tx_status.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GNPTXSTS(usb->index)); + if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, + tx_status.s.nptxfspcavail)) + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, nptxfempmsk, 1); + else + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, nptxfempmsk, 0); + } +} + +/** + * Fill the TX FIFO with an outgoing packet + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel number to get packet from + */ +static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel) +{ + union cvmx_usbcx_hccharx hcchar; + union cvmx_usbcx_hcspltx usbc_hcsplt; + union cvmx_usbcx_hctsizx usbc_hctsiz; + struct cvmx_usb_tx_fifo *fifo; + + /* We only need to fill data on outbound channels */ + hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index)); + if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT) + return; + + /* OUT Splits only have data on the start and not the complete */ + usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCSPLTX(channel, usb->index)); + if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt) + return; + + /* + * Find out how many bytes we need to fill and convert it into 32bit + * words. + */ + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + if (!usbc_hctsiz.s.xfersize) + return; + + if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) || + (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS)) + fifo = &usb->periodic; + else + fifo = &usb->nonperiodic; + + fifo->entry[fifo->head].channel = channel; + fifo->entry[fifo->head].address = + cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + + channel * 8); + fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; + fifo->head++; + if (fifo->head > MAX_CHANNELS) + fifo->head = 0; + + cvmx_usb_poll_tx_fifo(usb); +} + +/** + * Perform channel specific setup for Control transactions. All + * the generic stuff will already have been done in cvmx_usb_start_channel(). + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel to setup + * @pipe: Pipe for control transaction + */ +static void cvmx_usb_start_channel_control(struct octeon_hcd *usb, + int channel, + struct cvmx_usb_pipe *pipe) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + struct cvmx_usb_transaction *transaction = + list_first_entry(&pipe->transactions, typeof(*transaction), + node); + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + int bytes_to_transfer = transaction->buffer_length - + transaction->actual_bytes; + int packets_to_transfer; + union cvmx_usbcx_hctsizx usbc_hctsiz; + + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + + switch (transaction->stage) { + case CVMX_USB_STAGE_NON_CONTROL: + case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: + dev_err(dev, "%s: ERROR - Non control stage\n", __func__); + break; + case CVMX_USB_STAGE_SETUP: + usbc_hctsiz.s.pid = 3; /* Setup */ + bytes_to_transfer = sizeof(*header); + /* All Control operations start with a setup going OUT */ + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + CVMX_USB_DIRECTION_OUT); + /* + * Setup send the control header instead of the buffer data. The + * buffer data will be used in the next stage + */ + cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + + channel * 8, + transaction->control_header); + break; + case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = 3; /* Setup */ + bytes_to_transfer = 0; + /* All Control operations start with a setup going OUT */ + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + CVMX_USB_DIRECTION_OUT); + + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + case CVMX_USB_STAGE_DATA: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (header->bRequestType & USB_DIR_IN) + bytes_to_transfer = 0; + else if (bytes_to_transfer > pipe->max_packet) + bytes_to_transfer = pipe->max_packet; + } + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT)); + break; + case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + if (!(header->bRequestType & USB_DIR_IN)) + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT)); + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + case CVMX_USB_STAGE_STATUS: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_OUT : + CVMX_USB_DIRECTION_IN)); + break; + case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + bytes_to_transfer = 0; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, epdir, + ((header->bRequestType & USB_DIR_IN) ? + CVMX_USB_DIRECTION_OUT : + CVMX_USB_DIRECTION_IN)); + USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), + cvmx_usbcx_hcspltx, compsplt, 1); + break; + } + + /* + * Make sure the transfer never exceeds the byte limit of the hardware. + * Further bytes will be sent as continued transactions + */ + if (bytes_to_transfer > MAX_TRANSFER_BYTES) { + /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */ + bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet; + bytes_to_transfer *= pipe->max_packet; + } + + /* + * Calculate the number of packets to transfer. If the length is zero + * we still need to transfer one packet + */ + packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer, + pipe->max_packet); + if (packets_to_transfer == 0) { + packets_to_transfer = 1; + } else if ((packets_to_transfer > 1) && + (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { + /* + * Limit to one packet when not using DMA. Channels must be + * restarted between every packet for IN transactions, so there + * is no reason to do multiple packets in a row + */ + packets_to_transfer = 1; + bytes_to_transfer = packets_to_transfer * pipe->max_packet; + } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { + /* + * Limit the number of packet and data transferred to what the + * hardware can handle + */ + packets_to_transfer = MAX_TRANSFER_PACKETS; + bytes_to_transfer = packets_to_transfer * pipe->max_packet; + } + + usbc_hctsiz.s.xfersize = bytes_to_transfer; + usbc_hctsiz.s.pktcnt = packets_to_transfer; + + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), + usbc_hctsiz.u32); +} + +/** + * Start a channel to perform the pipe's head transaction + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @channel: Channel to setup + * @pipe: Pipe to start + */ +static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel, + struct cvmx_usb_pipe *pipe) +{ + struct cvmx_usb_transaction *transaction = + list_first_entry(&pipe->transactions, typeof(*transaction), + node); + + /* Make sure all writes to the DMA region get flushed */ + CVMX_SYNCW; + + /* Attach the channel to the pipe */ + usb->pipe_for_channel[channel] = pipe; + pipe->channel = channel; + pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED; + + /* Mark this channel as in use */ + usb->idle_hardware_channels &= ~(1 << channel); + + /* Enable the channel interrupt bits */ + { + union cvmx_usbcx_hcintx usbc_hcint; + union cvmx_usbcx_hcintmskx usbc_hcintmsk; + union cvmx_usbcx_haintmsk usbc_haintmsk; + + /* Clear all channel status bits */ + usbc_hcint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index)); + + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index), + usbc_hcint.u32); + + usbc_hcintmsk.u32 = 0; + usbc_hcintmsk.s.chhltdmsk = 1; + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + /* + * Channels need these extra interrupts when we aren't + * in DMA mode. + */ + usbc_hcintmsk.s.datatglerrmsk = 1; + usbc_hcintmsk.s.frmovrunmsk = 1; + usbc_hcintmsk.s.bblerrmsk = 1; + usbc_hcintmsk.s.xacterrmsk = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * Splits don't generate xfercompl, so we need + * ACK and NYET. + */ + usbc_hcintmsk.s.nyetmsk = 1; + usbc_hcintmsk.s.ackmsk = 1; + } + usbc_hcintmsk.s.nakmsk = 1; + usbc_hcintmsk.s.stallmsk = 1; + usbc_hcintmsk.s.xfercomplmsk = 1; + } + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + usbc_hcintmsk.u32); + + /* Enable the channel interrupt to propagate */ + usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HAINTMSK(usb->index)); + usbc_haintmsk.s.haintmsk |= 1 << channel; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), + usbc_haintmsk.u32); + } + + /* Setup the location the DMA engine uses. */ + { + u64 reg; + u64 dma_address = transaction->buffer + + transaction->actual_bytes; + + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + dma_address = transaction->buffer + + transaction->iso_packets[0].offset + + transaction->actual_bytes; + + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) + reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index); + else + reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index); + cvmx_write64_uint64(reg + channel * 8, dma_address); + } + + /* Setup both the size of the transfer and the SPLIT characteristics */ + { + union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0}; + union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0}; + int packets_to_transfer; + int bytes_to_transfer = transaction->buffer_length - + transaction->actual_bytes; + + /* + * ISOCHRONOUS transactions store each individual transfer size + * in the packet structure, not the global buffer_length + */ + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + bytes_to_transfer = + transaction->iso_packets[0].length - + transaction->actual_bytes; + + /* + * We need to do split transactions when we are talking to non + * high speed devices that are behind a high speed hub + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * On the start split phase (stage is even) record the + * frame number we will need to send the split complete. + * We only store the lower two bits since the time ahead + * can only be two frames + */ + if ((transaction->stage & 1) == 0) { + if (transaction->type == CVMX_USB_TRANSFER_BULK) + pipe->split_sc_frame = + (usb->frame_number + 1) & 0x7f; + else + pipe->split_sc_frame = + (usb->frame_number + 2) & 0x7f; + } else { + pipe->split_sc_frame = -1; + } + + usbc_hcsplt.s.spltena = 1; + usbc_hcsplt.s.hubaddr = pipe->hub_device_addr; + usbc_hcsplt.s.prtaddr = pipe->hub_port; + usbc_hcsplt.s.compsplt = (transaction->stage == + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE); + + /* + * SPLIT transactions can only ever transmit one data + * packet so limit the transfer size to the max packet + * size + */ + if (bytes_to_transfer > pipe->max_packet) + bytes_to_transfer = pipe->max_packet; + + /* + * ISOCHRONOUS OUT splits are unique in that they limit + * data transfers to 188 byte chunks representing the + * begin/middle/end of the data or all + */ + if (!usbc_hcsplt.s.compsplt && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (pipe->transfer_type == + CVMX_USB_TRANSFER_ISOCHRONOUS)) { + /* + * Clear the split complete frame number as + * there isn't going to be a split complete + */ + pipe->split_sc_frame = -1; + /* + * See if we've started this transfer and sent + * data + */ + if (transaction->actual_bytes == 0) { + /* + * Nothing sent yet, this is either a + * begin or the entire payload + */ + if (bytes_to_transfer <= 188) + /* Entire payload in one go */ + usbc_hcsplt.s.xactpos = 3; + else + /* First part of payload */ + usbc_hcsplt.s.xactpos = 2; + } else { + /* + * Continuing the previous data, we must + * either be in the middle or at the end + */ + if (bytes_to_transfer <= 188) + /* End of payload */ + usbc_hcsplt.s.xactpos = 1; + else + /* Middle of payload */ + usbc_hcsplt.s.xactpos = 0; + } + /* + * Again, the transfer size is limited to 188 + * bytes + */ + if (bytes_to_transfer > 188) + bytes_to_transfer = 188; + } + } + + /* + * Make sure the transfer never exceeds the byte limit of the + * hardware. Further bytes will be sent as continued + * transactions + */ + if (bytes_to_transfer > MAX_TRANSFER_BYTES) { + /* + * Round MAX_TRANSFER_BYTES to a multiple of out packet + * size + */ + bytes_to_transfer = MAX_TRANSFER_BYTES / + pipe->max_packet; + bytes_to_transfer *= pipe->max_packet; + } + + /* + * Calculate the number of packets to transfer. If the length is + * zero we still need to transfer one packet + */ + packets_to_transfer = + DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet); + if (packets_to_transfer == 0) { + packets_to_transfer = 1; + } else if ((packets_to_transfer > 1) && + (usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { + /* + * Limit to one packet when not using DMA. Channels must + * be restarted between every packet for IN + * transactions, so there is no reason to do multiple + * packets in a row + */ + packets_to_transfer = 1; + bytes_to_transfer = packets_to_transfer * + pipe->max_packet; + } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) { + /* + * Limit the number of packet and data transferred to + * what the hardware can handle + */ + packets_to_transfer = MAX_TRANSFER_PACKETS; + bytes_to_transfer = packets_to_transfer * + pipe->max_packet; + } + + usbc_hctsiz.s.xfersize = bytes_to_transfer; + usbc_hctsiz.s.pktcnt = packets_to_transfer; + + /* Update the DATA0/DATA1 toggle */ + usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); + /* + * High speed pipes may need a hardware ping before they start + */ + if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING) + usbc_hctsiz.s.dopng = 1; + + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCSPLTX(channel, usb->index), + usbc_hcsplt.u32); + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index), + usbc_hctsiz.u32); + } + + /* Setup the Host Channel Characteristics Register */ + { + union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0}; + + /* + * Set the startframe odd/even properly. This is only used for + * periodic + */ + usbc_hcchar.s.oddfrm = usb->frame_number & 1; + + /* + * Set the number of back to back packets allowed by this + * endpoint. Split transactions interpret "ec" as the number of + * immediate retries of failure. These retries happen too + * quickly, so we disable these entirely for splits + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) + usbc_hcchar.s.ec = 1; + else if (pipe->multi_count < 1) + usbc_hcchar.s.ec = 1; + else if (pipe->multi_count > 3) + usbc_hcchar.s.ec = 3; + else + usbc_hcchar.s.ec = pipe->multi_count; + + /* Set the rest of the endpoint specific settings */ + usbc_hcchar.s.devaddr = pipe->device_addr; + usbc_hcchar.s.eptype = transaction->type; + usbc_hcchar.s.lspddev = + (pipe->device_speed == CVMX_USB_SPEED_LOW); + usbc_hcchar.s.epdir = pipe->transfer_dir; + usbc_hcchar.s.epnum = pipe->endpoint_num; + usbc_hcchar.s.mps = pipe->max_packet; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index), + usbc_hcchar.u32); + } + + /* Do transaction type specific fixups as needed */ + switch (transaction->type) { + case CVMX_USB_TRANSFER_CONTROL: + cvmx_usb_start_channel_control(usb, channel, pipe); + break; + case CVMX_USB_TRANSFER_BULK: + case CVMX_USB_TRANSFER_INTERRUPT: + break; + case CVMX_USB_TRANSFER_ISOCHRONOUS: + if (!cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * ISO transactions require different PIDs depending on + * direction and how many packets are needed + */ + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { + if (pipe->multi_count < 2) /* Need DATA0 */ + USB_SET_FIELD32( + CVMX_USBCX_HCTSIZX(channel, + usb->index), + cvmx_usbcx_hctsizx, pid, 0); + else /* Need MDATA */ + USB_SET_FIELD32( + CVMX_USBCX_HCTSIZX(channel, + usb->index), + cvmx_usbcx_hctsizx, pid, 3); + } + } + break; + } + { + union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 = + cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, + usb->index)) + }; + transaction->xfersize = usbc_hctsiz.s.xfersize; + transaction->pktcnt = usbc_hctsiz.s.pktcnt; + } + /* Remember when we start a split transaction */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) + usb->active_split = transaction; + USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), + cvmx_usbcx_hccharx, chena, 1); + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_fill_tx_fifo(usb, channel); +} + +/** + * Find a pipe that is ready to be scheduled to hardware. + * @usb: USB device state populated by cvmx_usb_initialize(). + * @xfer_type: Transfer type + * + * Returns: Pipe or NULL if none are ready + */ +static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb, + enum cvmx_usb_transfer xfer_type) +{ + struct list_head *list = usb->active_pipes + xfer_type; + u64 current_frame = usb->frame_number; + struct cvmx_usb_pipe *pipe; + + list_for_each_entry(pipe, list, node) { + struct cvmx_usb_transaction *t = + list_first_entry(&pipe->transactions, typeof(*t), + node); + if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t && + (pipe->next_tx_frame <= current_frame) && + ((pipe->split_sc_frame == -1) || + ((((int)current_frame - pipe->split_sc_frame) & 0x7f) < + 0x40)) && + (!usb->active_split || (usb->active_split == t))) { + prefetch(t); + return pipe; + } + } + return NULL; +} + +static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb, + int is_sof) +{ + struct cvmx_usb_pipe *pipe; + + /* Find a pipe needing service. */ + if (is_sof) { + /* + * Only process periodic pipes on SOF interrupts. This way we + * are sure that the periodic data is sent in the beginning of + * the frame. + */ + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_ISOCHRONOUS); + if (pipe) + return pipe; + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_INTERRUPT); + if (pipe) + return pipe; + } + pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL); + if (pipe) + return pipe; + return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK); +} + +/** + * Called whenever a pipe might need to be scheduled to the + * hardware. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @is_sof: True if this schedule was called on a SOF interrupt. + */ +static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof) +{ + int channel; + struct cvmx_usb_pipe *pipe; + int need_sof; + enum cvmx_usb_transfer ttype; + + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + /* + * Without DMA we need to be careful to not schedule something + * at the end of a frame and cause an overrun. + */ + union cvmx_usbcx_hfnum hfnum = { + .u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HFNUM(usb->index)) + }; + + union cvmx_usbcx_hfir hfir = { + .u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HFIR(usb->index)) + }; + + if (hfnum.s.frrem < hfir.s.frint / 4) + goto done; + } + + while (usb->idle_hardware_channels) { + /* Find an idle channel */ + channel = __fls(usb->idle_hardware_channels); + if (unlikely(channel > 7)) + break; + + pipe = cvmx_usb_next_pipe(usb, is_sof); + if (!pipe) + break; + + cvmx_usb_start_channel(usb, channel, pipe); + } + +done: + /* + * Only enable SOF interrupts when we have transactions pending in the + * future that might need to be scheduled + */ + need_sof = 0; + for (ttype = CVMX_USB_TRANSFER_CONTROL; + ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) { + list_for_each_entry(pipe, &usb->active_pipes[ttype], node) { + if (pipe->next_tx_frame > usb->frame_number) { + need_sof = 1; + break; + } + } + } + USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), + cvmx_usbcx_gintmsk, sofmsk, need_sof); +} + +static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb, + enum cvmx_usb_status status, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction + *transaction, + int bytes_transferred, + struct urb *urb) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + + if (likely(status == CVMX_USB_STATUS_OK)) + urb->actual_length = bytes_transferred; + else + urb->actual_length = 0; + + urb->hcpriv = NULL; + + /* For Isochronous transactions we need to update the URB packet status + * list from data in our private copy + */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + int i; + /* + * The pointer to the private list is stored in the setup_packet + * field. + */ + struct cvmx_usb_iso_packet *iso_packet = + (struct cvmx_usb_iso_packet *)urb->setup_packet; + /* Recalculate the transfer size by adding up each packet */ + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) { + if (iso_packet[i].status == CVMX_USB_STATUS_OK) { + urb->iso_frame_desc[i].status = 0; + urb->iso_frame_desc[i].actual_length = + iso_packet[i].length; + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } else { + dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n", + i, urb->number_of_packets, + iso_packet[i].status, pipe, + transaction, iso_packet[i].length); + urb->iso_frame_desc[i].status = -EREMOTEIO; + } + } + /* Free the private list now that we don't need it anymore */ + kfree(iso_packet); + urb->setup_packet = NULL; + } + + switch (status) { + case CVMX_USB_STATUS_OK: + urb->status = 0; + break; + case CVMX_USB_STATUS_CANCEL: + if (urb->status == 0) + urb->status = -ENOENT; + break; + case CVMX_USB_STATUS_STALL: + dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EPIPE; + break; + case CVMX_USB_STATUS_BABBLEERR: + dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EPIPE; + break; + case CVMX_USB_STATUS_SHORT: + dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n", + pipe, transaction, bytes_transferred); + urb->status = -EREMOTEIO; + break; + case CVMX_USB_STATUS_ERROR: + case CVMX_USB_STATUS_XACTERR: + case CVMX_USB_STATUS_DATATGLERR: + case CVMX_USB_STATUS_FRAMEERR: + dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n", + status, pipe, transaction, bytes_transferred); + urb->status = -EPROTO; + break; + } + usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb); + spin_unlock(&usb->lock); + usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status); + spin_lock(&usb->lock); +} + +/** + * Signal the completion of a transaction and free it. The + * transaction will be removed from the pipe transaction list. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe the transaction is on + * @transaction: + * Transaction that completed + * @complete_code: + * Completion code + */ +static void cvmx_usb_complete(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + enum cvmx_usb_status complete_code) +{ + /* If this was a split then clear our split in progress marker */ + if (usb->active_split == transaction) + usb->active_split = NULL; + + /* + * Isochronous transactions need extra processing as they might not be + * done after a single data transfer + */ + if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) { + /* Update the number of bytes transferred in this ISO packet */ + transaction->iso_packets[0].length = transaction->actual_bytes; + transaction->iso_packets[0].status = complete_code; + + /* + * If there are more ISOs pending and we succeeded, schedule the + * next one + */ + if ((transaction->iso_number_packets > 1) && + (complete_code == CVMX_USB_STATUS_OK)) { + /* No bytes transferred for this packet as of yet */ + transaction->actual_bytes = 0; + /* One less ISO waiting to transfer */ + transaction->iso_number_packets--; + /* Increment to the next location in our packet array */ + transaction->iso_packets++; + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + return; + } + } + + /* Remove the transaction from the pipe list */ + list_del(&transaction->node); + if (list_empty(&pipe->transactions)) + list_move_tail(&pipe->node, &usb->idle_pipes); + octeon_usb_urb_complete_callback(usb, complete_code, pipe, + transaction, + transaction->actual_bytes, + transaction->urb); + kfree(transaction); +} + +/** + * Submit a usb transaction to a pipe. Called for all types + * of transactions. + * + * @usb: + * @pipe: Which pipe to submit to. + * @type: Transaction type + * @buffer: User buffer for the transaction + * @buffer_length: + * User buffer's length in bytes + * @control_header: + * For control transactions, the 8 byte standard header + * @iso_start_frame: + * For ISO transactions, the start frame + * @iso_number_packets: + * For ISO, the number of packet in the transaction. + * @iso_packets: + * A description of each ISO packet + * @urb: URB for the callback + * + * Returns: Transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + enum cvmx_usb_transfer type, + u64 buffer, + int buffer_length, + u64 control_header, + int iso_start_frame, + int iso_number_packets, + struct cvmx_usb_iso_packet *iso_packets, + struct urb *urb) +{ + struct cvmx_usb_transaction *transaction; + + if (unlikely(pipe->transfer_type != type)) + return NULL; + + transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC); + if (unlikely(!transaction)) + return NULL; + + transaction->type = type; + transaction->buffer = buffer; + transaction->buffer_length = buffer_length; + transaction->control_header = control_header; + /* FIXME: This is not used, implement it. */ + transaction->iso_start_frame = iso_start_frame; + transaction->iso_number_packets = iso_number_packets; + transaction->iso_packets = iso_packets; + transaction->urb = urb; + if (transaction->type == CVMX_USB_TRANSFER_CONTROL) + transaction->stage = CVMX_USB_STAGE_SETUP; + else + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + + if (!list_empty(&pipe->transactions)) { + list_add_tail(&transaction->node, &pipe->transactions); + } else { + list_add_tail(&transaction->node, &pipe->transactions); + list_move_tail(&pipe->node, + &usb->active_pipes[pipe->transfer_type]); + + /* + * We may need to schedule the pipe if this was the head of the + * pipe. + */ + cvmx_usb_schedule(usb, 0); + } + + return transaction; +} + +/** + * Call to submit a USB Bulk transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Interrupt transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB returned when the callback is called. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_INTERRUPT, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Control transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_control( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + int buffer_length = urb->transfer_buffer_length; + u64 control_header = urb->setup_dma; + struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header); + + if ((header->bRequestType & USB_DIR_IN) == 0) + buffer_length = le16_to_cpu(header->wLength); + + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_CONTROL, + urb->transfer_dma, buffer_length, + control_header, + 0, /* iso_start_frame */ + 0, /* iso_number_packets */ + NULL, /* iso_packets */ + urb); +} + +/** + * Call to submit a USB Isochronous transfer to a pipe. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Handle to the pipe for the transfer. + * @urb: URB returned when the callback is called. + * + * Returns: A submitted transaction or NULL on failure. + */ +static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( + struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct urb *urb) +{ + struct cvmx_usb_iso_packet *packets; + + packets = (struct cvmx_usb_iso_packet *)urb->setup_packet; + return cvmx_usb_submit_transaction(usb, pipe, + CVMX_USB_TRANSFER_ISOCHRONOUS, + urb->transfer_dma, + urb->transfer_buffer_length, + 0, /* control_header */ + urb->start_frame, + urb->number_of_packets, + packets, urb); +} + +/** + * Cancel one outstanding request in a pipe. Canceling a request + * can fail if the transaction has already completed before cancel + * is called. Even after a successful cancel call, it may take + * a frame or two for the cvmx_usb_poll() function to call the + * associated callback. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to cancel requests in. + * @transaction: Transaction to cancel, returned by the submit function. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_cancel(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction) +{ + /* + * If the transaction is the HEAD of the queue and scheduled. We need to + * treat it special + */ + if (list_first_entry(&pipe->transactions, typeof(*transaction), node) == + transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) { + union cvmx_usbcx_hccharx usbc_hcchar; + + usb->pipe_for_channel[pipe->channel] = NULL; + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; + + CVMX_SYNCW; + + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(pipe->channel, usb->index)); + /* + * If the channel isn't enabled then the transaction already + * completed. + */ + if (usbc_hcchar.s.chena) { + usbc_hcchar.s.chdis = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(pipe->channel, + usb->index), + usbc_hcchar.u32); + } + } + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL); + return 0; +} + +/** + * Cancel all outstanding requests in a pipe. Logically all this + * does is call cvmx_usb_cancel() in a loop. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to cancel requests in. + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_cancel_all(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + struct cvmx_usb_transaction *transaction, *next; + + /* Simply loop through and attempt to cancel each transaction */ + list_for_each_entry_safe(transaction, next, &pipe->transactions, node) { + int result = cvmx_usb_cancel(usb, pipe, transaction); + + if (unlikely(result != 0)) + return result; + } + return 0; +} + +/** + * Close a pipe created with cvmx_usb_open_pipe(). + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * @pipe: Pipe to close. + * + * Returns: 0 or a negative error code. EBUSY is returned if the pipe has + * outstanding transfers. + */ +static int cvmx_usb_close_pipe(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe) +{ + /* Fail if the pipe has pending transactions */ + if (!list_empty(&pipe->transactions)) + return -EBUSY; + + list_del(&pipe->node); + kfree(pipe); + + return 0; +} + +/** + * Get the current USB protocol level frame number. The frame + * number is always in the range of 0-0x7ff. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: USB frame number + */ +static int cvmx_usb_get_frame_number(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hfnum usbc_hfnum; + + usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); + + return usbc_hfnum.s.frnum; +} + +static void cvmx_usb_transfer_control(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hccharx usbc_hcchar, + int buffer_space_left, + int bytes_in_last_packet) +{ + switch (transaction->stage) { + case CVMX_USB_STAGE_NON_CONTROL: + case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: + /* This should be impossible */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + break; + case CVMX_USB_STAGE_SETUP: + pipe->pid_toggle = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = + CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE; + } else { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: + { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA: + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE; + /* + * For setup OUT data that are splits, + * the hardware doesn't appear to count + * transferred data. Here we manually + * update the data transferred + */ + if (!usbc_hcchar.s.epdir) { + if (buffer_space_left < pipe->max_packet) + transaction->actual_bytes += + buffer_space_left; + else + transaction->actual_bytes += + pipe->max_packet; + } + } else if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } else { + transaction->stage = CVMX_USB_STAGE_DATA; + } + break; + case CVMX_USB_STAGE_STATUS: + if (cvmx_usb_pipe_needs_split(usb, pipe)) + transaction->stage = + CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + break; + case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + break; + } +} + +static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hcintx usbc_hcint, + int buffer_space_left, + int bytes_in_last_packet) +{ + /* + * The only time a bulk transfer isn't complete when it finishes with + * an ACK is during a split transaction. For splits we need to continue + * the transfer if more data is needed. + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } else { + if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (usbc_hcint.s.nak)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_intr(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) { + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet, + int bytes_this_transfer) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * ISOCHRONOUS OUT splits don't require a complete split stage. + * Instead they use a sequence of begin OUT splits to transfer + * the data 188 bytes at a time. Once the transfer is complete, + * the pipe sleeps until the next schedule interval. + */ + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { + /* + * If no space left or this wasn't a max size packet + * then this transfer is complete. Otherwise start it + * again to send the next 188 bytes + */ + if (!buffer_space_left || (bytes_this_transfer < 188)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + return; + } + if (transaction->stage == + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) { + /* + * We are in the incoming data phase. Keep getting data + * until we run out of space or get a small packet + */ + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} + +/** + * Poll a channel for status + * + * @usb: USB device + * @channel: Channel to poll + * + * Returns: Zero on success + */ +static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel) +{ + struct usb_hcd *hcd = octeon_to_hcd(usb); + struct device *dev = hcd->self.controller; + union cvmx_usbcx_hcintx usbc_hcint; + union cvmx_usbcx_hctsizx usbc_hctsiz; + union cvmx_usbcx_hccharx usbc_hcchar; + struct cvmx_usb_pipe *pipe; + struct cvmx_usb_transaction *transaction; + int bytes_this_transfer; + int bytes_in_last_packet; + int packets_processed; + int buffer_space_left; + + /* Read the interrupt status bits for the channel */ + usbc_hcint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCINTX(channel, usb->index)); + + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) { + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index)); + + if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) { + /* + * There seems to be a bug in CN31XX which can cause + * interrupt IN transfers to get stuck until we do a + * write of HCCHARX without changing things + */ + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, + usb->index), + usbc_hcchar.u32); + return 0; + } + + /* + * In non DMA mode the channels don't halt themselves. We need + * to manually disable channels that are left running + */ + if (!usbc_hcint.s.chhltd) { + if (usbc_hcchar.s.chena) { + union cvmx_usbcx_hcintmskx hcintmsk; + /* Disable all interrupts except CHHLTD */ + hcintmsk.u32 = 0; + hcintmsk.s.chhltdmsk = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCINTMSKX(channel, usb->index), + hcintmsk.u32); + usbc_hcchar.s.chdis = 1; + cvmx_usb_write_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index), + usbc_hcchar.u32); + return 0; + } else if (usbc_hcint.s.xfercompl) { + /* + * Successful IN/OUT with transfer complete. + * Channel halt isn't needed. + */ + } else { + dev_err(dev, "USB%d: Channel %d interrupt without halt\n", + usb->index, channel); + return 0; + } + } + } else { + /* + * There is are no interrupts that we need to process when the + * channel is still running + */ + if (!usbc_hcint.s.chhltd) + return 0; + } + + /* Disable the channel interrupts now that it is done */ + cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0); + usb->idle_hardware_channels |= (1 << channel); + + /* Make sure this channel is tied to a valid pipe */ + pipe = usb->pipe_for_channel[channel]; + prefetch(pipe); + if (!pipe) + return 0; + transaction = list_first_entry(&pipe->transactions, + typeof(*transaction), + node); + prefetch(transaction); + + /* + * Disconnect this pipe from the HW channel. Later the schedule + * function will figure out which pipe needs to go + */ + usb->pipe_for_channel[channel] = NULL; + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED; + + /* + * Read the channel config info so we can figure out how much data + * transferred + */ + usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCCHARX(channel, usb->index)); + usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HCTSIZX(channel, usb->index)); + + /* + * Calculating the number of bytes successfully transferred is dependent + * on the transfer direction + */ + packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt; + if (usbc_hcchar.s.epdir) { + /* + * IN transactions are easy. For every byte received the + * hardware decrements xfersize. All we need to do is subtract + * the current value of xfersize from its starting value and we + * know how many bytes were written to the buffer + */ + bytes_this_transfer = transaction->xfersize - + usbc_hctsiz.s.xfersize; + } else { + /* + * OUT transaction don't decrement xfersize. Instead pktcnt is + * decremented on every successful packet send. The hardware + * does this when it receives an ACK, or NYET. If it doesn't + * receive one of these responses pktcnt doesn't change + */ + bytes_this_transfer = packets_processed * usbc_hcchar.s.mps; + /* + * The last packet may not be a full transfer if we didn't have + * enough data + */ + if (bytes_this_transfer > transaction->xfersize) + bytes_this_transfer = transaction->xfersize; + } + /* Figure out how many bytes were in the last packet of the transfer */ + if (packets_processed) + bytes_in_last_packet = bytes_this_transfer - + (packets_processed - 1) * usbc_hcchar.s.mps; + else + bytes_in_last_packet = bytes_this_transfer; + + /* + * As a special case, setup transactions output the setup header, not + * the user's data. For this reason we don't count setup data as bytes + * transferred + */ + if ((transaction->stage == CVMX_USB_STAGE_SETUP) || + (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE)) + bytes_this_transfer = 0; + + /* + * Add the bytes transferred to the running total. It is important that + * bytes_this_transfer doesn't count any data that needs to be + * retransmitted + */ + transaction->actual_bytes += bytes_this_transfer; + if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) + buffer_space_left = transaction->iso_packets[0].length - + transaction->actual_bytes; + else + buffer_space_left = transaction->buffer_length - + transaction->actual_bytes; + + /* + * We need to remember the PID toggle state for the next transaction. + * The hardware already updated it for the next transaction + */ + pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0); + + /* + * For high speed bulk out, assume the next transaction will need to do + * a ping before proceeding. If this isn't true the ACK processing below + * will clear this flag + */ + if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && + (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + + if (WARN_ON_ONCE(bytes_this_transfer < 0)) { + /* + * In some rare cases the DMA engine seems to get stuck and + * keeps substracting same byte count over and over again. In + * such case we just need to fail every transaction. + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + return 0; + } + + if (usbc_hcint.s.stall) { + /* + * STALL as a response means this transaction cannot be + * completed because the device can't process transactions. Tell + * the user. Any data that was transferred will be counted on + * the actual bytes transferred + */ + pipe->pid_toggle = 0; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_STALL); + } else if (usbc_hcint.s.xacterr) { + /* + * XactErr as a response means the device signaled + * something wrong with the transfer. For example, PID + * toggle errors cause these. + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_XACTERR); + } else if (usbc_hcint.s.bblerr) { + /* Babble Error (BblErr) */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_BABBLEERR); + } else if (usbc_hcint.s.datatglerr) { + /* Data toggle error */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_DATATGLERR); + } else if (usbc_hcint.s.nyet) { + /* + * NYET as a response is only allowed in three cases: as a + * response to a ping, as a response to a split transaction, and + * as a response to a bulk out. The ping case is handled by + * hardware, so we only have splits and bulk out + */ + if (!cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->retries = 0; + /* + * If there is more data to go then we need to try + * again. Otherwise this transaction is complete + */ + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, + transaction, + CVMX_USB_STATUS_OK); + } else { + /* + * Split transactions retry the split complete 4 times + * then rewind to the start split and do the entire + * transactions again + */ + transaction->retries++; + if ((transaction->retries & 0x3) == 0) { + /* + * Rewind to the beginning of the transaction by + * anding off the split complete bit + */ + transaction->stage &= ~1; + pipe->split_sc_frame = -1; + } + } + } else if (usbc_hcint.s.ack) { + transaction->retries = 0; + /* + * The ACK bit can only be checked after the other error bits. + * This is because a multi packet transfer may succeed in a + * number of packets and then get a different response on the + * last packet. In this case both ACK and the last response bit + * will be set. If none of the other response bits is set, then + * the last packet must have been an ACK + * + * Since we got an ACK, we know we don't need to do a ping on + * this pipe + */ + pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING; + + switch (transaction->type) { + case CVMX_USB_TRANSFER_CONTROL: + cvmx_usb_transfer_control(usb, pipe, transaction, + usbc_hcchar, + buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_BULK: + cvmx_usb_transfer_bulk(usb, pipe, transaction, + usbc_hcint, buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_INTERRUPT: + cvmx_usb_transfer_intr(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet); + break; + case CVMX_USB_TRANSFER_ISOCHRONOUS: + cvmx_usb_transfer_isoc(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet, + bytes_this_transfer); + break; + } + } else if (usbc_hcint.s.nak) { + /* + * If this was a split then clear our split in progress marker. + */ + if (usb->active_split == transaction) + usb->active_split = NULL; + /* + * NAK as a response means the device couldn't accept the + * transaction, but it should be retried in the future. Rewind + * to the beginning of the transaction by anding off the split + * complete bit. Retry in the next interval + */ + transaction->retries = 0; + transaction->stage &= ~1; + pipe->next_tx_frame += pipe->interval; + if (pipe->next_tx_frame < usb->frame_number) + pipe->next_tx_frame = usb->frame_number + + pipe->interval - + (usb->frame_number - pipe->next_tx_frame) % + pipe->interval; + } else { + struct cvmx_usb_port_status port; + + port = cvmx_usb_get_status(usb); + if (port.port_enabled) { + /* We'll retry the exact same transaction again */ + transaction->retries++; + } else { + /* + * We get channel halted interrupts with no result bits + * sets when the cable is unplugged + */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + } + } + return 0; +} + +static void octeon_usb_port_callback(struct octeon_hcd *usb) +{ + spin_unlock(&usb->lock); + usb_hcd_poll_rh_status(octeon_to_hcd(usb)); + spin_lock(&usb->lock); +} + +/** + * Poll the USB block for status and call all needed callback + * handlers. This function is meant to be called in the interrupt + * handler for the USB controller. It can also be called + * periodically in a loop for non-interrupt based operation. + * + * @usb: USB device state populated by cvmx_usb_initialize(). + * + * Returns: 0 or a negative error code. + */ +static int cvmx_usb_poll(struct octeon_hcd *usb) +{ + union cvmx_usbcx_hfnum usbc_hfnum; + union cvmx_usbcx_gintsts usbc_gintsts; + + prefetch_range(usb, sizeof(*usb)); + + /* Update the frame counter */ + usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); + if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum) + usb->frame_number += 0x4000; + usb->frame_number &= ~0x3fffull; + usb->frame_number |= usbc_hfnum.s.frnum; + + /* Read the pending interrupts */ + usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_GINTSTS(usb->index)); + + /* Clear the interrupts now that we know about them */ + cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), + usbc_gintsts.u32); + + if (usbc_gintsts.s.rxflvl) { + /* + * RxFIFO Non-Empty (RxFLvl) + * Indicates that there is at least one packet pending to be + * read from the RxFIFO. + * + * In DMA mode this is handled by hardware + */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_poll_rx_fifo(usb); + } + if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) { + /* Fill the Tx FIFOs when not in DMA mode */ + if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) + cvmx_usb_poll_tx_fifo(usb); + } + if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) { + union cvmx_usbcx_hprt usbc_hprt; + /* + * Disconnect Detected Interrupt (DisconnInt) + * Asserted when a device disconnect is detected. + * + * Host Port Interrupt (PrtInt) + * The core sets this bit to indicate a change in port status of + * one of the O2P USB core ports in Host mode. The application + * must read the Host Port Control and Status (HPRT) register to + * determine the exact event that caused this interrupt. The + * application must clear the appropriate status bit in the Host + * Port Control and Status register to clear this bit. + * + * Call the user's port callback + */ + octeon_usb_port_callback(usb); + /* Clear the port change bits */ + usbc_hprt.u32 = + cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); + usbc_hprt.s.prtena = 0; + cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), + usbc_hprt.u32); + } + if (usbc_gintsts.s.hchint) { + /* + * Host Channels Interrupt (HChInt) + * The core sets this bit to indicate that an interrupt is + * pending on one of the channels of the core (in Host mode). + * The application must read the Host All Channels Interrupt + * (HAINT) register to determine the exact number of the channel + * on which the interrupt occurred, and then read the + * corresponding Host Channel-n Interrupt (HCINTn) register to + * determine the exact cause of the interrupt. The application + * must clear the appropriate status bit in the HCINTn register + * to clear this bit. + */ + union cvmx_usbcx_haint usbc_haint; + + usbc_haint.u32 = cvmx_usb_read_csr32(usb, + CVMX_USBCX_HAINT(usb->index)); + while (usbc_haint.u32) { + int channel; + + channel = __fls(usbc_haint.u32); + cvmx_usb_poll_channel(usb, channel); + usbc_haint.u32 ^= 1 << channel; + } + } + + cvmx_usb_schedule(usb, usbc_gintsts.s.sof); + + return 0; +} + +/* convert between an HCD pointer and the corresponding struct octeon_hcd */ +static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd) +{ + return (struct octeon_hcd *)(hcd->hcd_priv); +} + +static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_poll(usb); + spin_unlock_irqrestore(&usb->lock, flags); + return IRQ_HANDLED; +} + +static int octeon_usb_start(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_RUNNING; + return 0; +} + +static void octeon_usb_stop(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_HALT; +} + +static int octeon_usb_get_frame_number(struct usb_hcd *hcd) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + + return cvmx_usb_get_frame_number(usb); +} + +static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct device *dev = hcd->self.controller; + struct cvmx_usb_transaction *transaction = NULL; + struct cvmx_usb_pipe *pipe; + unsigned long flags; + struct cvmx_usb_iso_packet *iso_packet; + struct usb_host_endpoint *ep = urb->ep; + int rc; + + urb->status = 0; + spin_lock_irqsave(&usb->lock, flags); + + rc = usb_hcd_link_urb_to_ep(hcd, urb); + if (rc) { + spin_unlock_irqrestore(&usb->lock, flags); + return rc; + } + + if (!ep->hcpriv) { + enum cvmx_usb_transfer transfer_type; + enum cvmx_usb_speed speed; + int split_device = 0; + int split_port = 0; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_ISOCHRONOUS: + transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS; + break; + case PIPE_INTERRUPT: + transfer_type = CVMX_USB_TRANSFER_INTERRUPT; + break; + case PIPE_CONTROL: + transfer_type = CVMX_USB_TRANSFER_CONTROL; + break; + default: + transfer_type = CVMX_USB_TRANSFER_BULK; + break; + } + switch (urb->dev->speed) { + case USB_SPEED_LOW: + speed = CVMX_USB_SPEED_LOW; + break; + case USB_SPEED_FULL: + speed = CVMX_USB_SPEED_FULL; + break; + default: + speed = CVMX_USB_SPEED_HIGH; + break; + } + /* + * For slow devices on high speed ports we need to find the hub + * that does the speed translation so we know where to send the + * split transactions. + */ + if (speed != CVMX_USB_SPEED_HIGH) { + /* + * Start at this device and work our way up the usb + * tree. + */ + struct usb_device *dev = urb->dev; + + while (dev->parent) { + /* + * If our parent is high speed then he'll + * receive the splits. + */ + if (dev->parent->speed == USB_SPEED_HIGH) { + split_device = dev->parent->devnum; + split_port = dev->portnum; + break; + } + /* + * Move up the tree one level. If we make it all + * the way up the tree, then the port must not + * be in high speed mode and we don't need a + * split. + */ + dev = dev->parent; + } + } + pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), speed, + le16_to_cpu(ep->desc.wMaxPacketSize) + & 0x7ff, + transfer_type, + usb_pipein(urb->pipe) ? + CVMX_USB_DIRECTION_IN : + CVMX_USB_DIRECTION_OUT, + urb->interval, + (le16_to_cpu(ep->desc.wMaxPacketSize) + >> 11) & 0x3, + split_device, split_port); + if (!pipe) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&usb->lock, flags); + dev_dbg(dev, "Failed to create pipe\n"); + return -ENOMEM; + } + ep->hcpriv = pipe; + } else { + pipe = ep->hcpriv; + } + + switch (usb_pipetype(urb->pipe)) { + case PIPE_ISOCHRONOUS: + dev_dbg(dev, "Submit isochronous to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + /* + * Allocate a structure to use for our private list of + * isochronous packets. + */ + iso_packet = kmalloc_array(urb->number_of_packets, + sizeof(struct cvmx_usb_iso_packet), + GFP_ATOMIC); + if (iso_packet) { + int i; + /* Fill the list with the data from the URB */ + for (i = 0; i < urb->number_of_packets; i++) { + iso_packet[i].offset = + urb->iso_frame_desc[i].offset; + iso_packet[i].length = + urb->iso_frame_desc[i].length; + iso_packet[i].status = CVMX_USB_STATUS_ERROR; + } + /* + * Store a pointer to the list in the URB setup_packet + * field. We know this currently isn't being used and + * this saves us a bunch of logic. + */ + urb->setup_packet = (char *)iso_packet; + transaction = cvmx_usb_submit_isochronous(usb, + pipe, urb); + /* + * If submit failed we need to free our private packet + * list. + */ + if (!transaction) { + urb->setup_packet = NULL; + kfree(iso_packet); + } + } + break; + case PIPE_INTERRUPT: + dev_dbg(dev, "Submit interrupt to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_interrupt(usb, pipe, urb); + break; + case PIPE_CONTROL: + dev_dbg(dev, "Submit control to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_control(usb, pipe, urb); + break; + case PIPE_BULK: + dev_dbg(dev, "Submit bulk to %d.%d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe)); + transaction = cvmx_usb_submit_bulk(usb, pipe, urb); + break; + } + if (!transaction) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&usb->lock, flags); + dev_dbg(dev, "Failed to submit\n"); + return -ENOMEM; + } + urb->hcpriv = transaction; + spin_unlock_irqrestore(&usb->lock, flags); + return 0; +} + +static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, + struct urb *urb, + int status) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + int rc; + + if (!urb->dev) + return -EINVAL; + + spin_lock_irqsave(&usb->lock, flags); + + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) + goto out; + + urb->status = status; + cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv); + +out: + spin_unlock_irqrestore(&usb->lock, flags); + + return rc; +} + +static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct device *dev = hcd->self.controller; + + if (ep->hcpriv) { + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct cvmx_usb_pipe *pipe = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_cancel_all(usb, pipe); + if (cvmx_usb_close_pipe(usb, pipe)) + dev_dbg(dev, "Closing pipe %p failed\n", pipe); + spin_unlock_irqrestore(&usb->lock, flags); + ep->hcpriv = NULL; + } +} + +static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct cvmx_usb_port_status port_status; + unsigned long flags; + + spin_lock_irqsave(&usb->lock, flags); + port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + buf[0] = port_status.connect_change << 1; + + return buf[0] != 0; +} + +static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct octeon_hcd *usb = hcd_to_octeon(hcd); + struct device *dev = hcd->self.controller; + struct cvmx_usb_port_status usb_port_status; + int port_status; + struct usb_hub_descriptor *desc; + unsigned long flags; + + switch (typeReq) { + case ClearHubFeature: + dev_dbg(dev, "ClearHubFeature\n"); + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* Nothing required here */ + break; + default: + return -EINVAL; + } + break; + case ClearPortFeature: + dev_dbg(dev, "ClearPortFeature\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + dev_dbg(dev, " ENABLE\n"); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_disable(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_SUSPEND: + dev_dbg(dev, " SUSPEND\n"); + /* Not supported on Octeon */ + break; + case USB_PORT_FEAT_POWER: + dev_dbg(dev, " POWER\n"); + /* Not supported on Octeon */ + break; + case USB_PORT_FEAT_INDICATOR: + dev_dbg(dev, " INDICATOR\n"); + /* Port inidicator not supported */ + break; + case USB_PORT_FEAT_C_CONNECTION: + dev_dbg(dev, " C_CONNECTION\n"); + /* Clears drivers internal connect status change flag */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_RESET: + dev_dbg(dev, " C_RESET\n"); + /* + * Clears the driver's internal Port Reset Change flag. + */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_ENABLE: + dev_dbg(dev, " C_ENABLE\n"); + /* + * Clears the driver's internal Port Enable/Disable + * Change flag. + */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + case USB_PORT_FEAT_C_SUSPEND: + dev_dbg(dev, " C_SUSPEND\n"); + /* + * Clears the driver's internal Port Suspend Change + * flag, which is set when resume signaling on the host + * port is complete. + */ + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(dev, " C_OVER_CURRENT\n"); + /* Clears the driver's overcurrent Change flag */ + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + break; + default: + dev_dbg(dev, " UNKNOWN\n"); + return -EINVAL; + } + break; + case GetHubDescriptor: + dev_dbg(dev, "GetHubDescriptor\n"); + desc = (struct usb_hub_descriptor *)buf; + desc->bDescLength = 9; + desc->bDescriptorType = 0x29; + desc->bNbrPorts = 1; + desc->wHubCharacteristics = cpu_to_le16(0x08); + desc->bPwrOn2PwrGood = 1; + desc->bHubContrCurrent = 0; + desc->u.hs.DeviceRemovable[0] = 0; + desc->u.hs.DeviceRemovable[1] = 0xff; + break; + case GetHubStatus: + dev_dbg(dev, "GetHubStatus\n"); + *(__le32 *)buf = 0; + break; + case GetPortStatus: + dev_dbg(dev, "GetPortStatus\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + spin_lock_irqsave(&usb->lock, flags); + usb_port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); + port_status = 0; + + if (usb_port_status.connect_change) { + port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); + dev_dbg(dev, " C_CONNECTION\n"); + } + + if (usb_port_status.port_enabled) { + port_status |= (1 << USB_PORT_FEAT_C_ENABLE); + dev_dbg(dev, " C_ENABLE\n"); + } + + if (usb_port_status.connected) { + port_status |= (1 << USB_PORT_FEAT_CONNECTION); + dev_dbg(dev, " CONNECTION\n"); + } + + if (usb_port_status.port_enabled) { + port_status |= (1 << USB_PORT_FEAT_ENABLE); + dev_dbg(dev, " ENABLE\n"); + } + + if (usb_port_status.port_over_current) { + port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); + dev_dbg(dev, " OVER_CURRENT\n"); + } + + if (usb_port_status.port_powered) { + port_status |= (1 << USB_PORT_FEAT_POWER); + dev_dbg(dev, " POWER\n"); + } + + if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) { + port_status |= USB_PORT_STAT_HIGH_SPEED; + dev_dbg(dev, " HIGHSPEED\n"); + } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) { + port_status |= (1 << USB_PORT_FEAT_LOWSPEED); + dev_dbg(dev, " LOWSPEED\n"); + } + + *((__le32 *)buf) = cpu_to_le32(port_status); + break; + case SetHubFeature: + dev_dbg(dev, "SetHubFeature\n"); + /* No HUB features supported */ + break; + case SetPortFeature: + dev_dbg(dev, "SetPortFeature\n"); + if (wIndex != 1) { + dev_dbg(dev, " INVALID\n"); + return -EINVAL; + } + + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + dev_dbg(dev, " SUSPEND\n"); + return -EINVAL; + case USB_PORT_FEAT_POWER: + dev_dbg(dev, " POWER\n"); + /* + * Program the port power bit to drive VBUS on the USB. + */ + spin_lock_irqsave(&usb->lock, flags); + USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), + cvmx_usbcx_hprt, prtpwr, 1); + spin_unlock_irqrestore(&usb->lock, flags); + return 0; + case USB_PORT_FEAT_RESET: + dev_dbg(dev, " RESET\n"); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_reset_port(usb); + spin_unlock_irqrestore(&usb->lock, flags); + return 0; + case USB_PORT_FEAT_INDICATOR: + dev_dbg(dev, " INDICATOR\n"); + /* Not supported */ + break; + default: + dev_dbg(dev, " UNKNOWN\n"); + return -EINVAL; + } + break; + default: + dev_dbg(dev, "Unknown root hub request\n"); + return -EINVAL; + } + return 0; +} + +static const struct hc_driver octeon_hc_driver = { + .description = "Octeon USB", + .product_desc = "Octeon Host Controller", + .hcd_priv_size = sizeof(struct octeon_hcd), + .irq = octeon_usb_irq, + .flags = HCD_MEMORY | HCD_DMA | HCD_USB2, + .start = octeon_usb_start, + .stop = octeon_usb_stop, + .urb_enqueue = octeon_usb_urb_enqueue, + .urb_dequeue = octeon_usb_urb_dequeue, + .endpoint_disable = octeon_usb_endpoint_disable, + .get_frame_number = octeon_usb_get_frame_number, + .hub_status_data = octeon_usb_hub_status_data, + .hub_control = octeon_usb_hub_control, + .map_urb_for_dma = octeon_map_urb_for_dma, + .unmap_urb_for_dma = octeon_unmap_urb_for_dma, +}; + +static int octeon_usb_probe(struct platform_device *pdev) +{ + int status; + int initialize_flags; + int usb_num; + struct resource *res_mem; + struct device_node *usbn_node; + int irq = platform_get_irq(pdev, 0); + struct device *dev = &pdev->dev; + struct octeon_hcd *usb; + struct usb_hcd *hcd; + u32 clock_rate = 48000000; + bool is_crystal_clock = false; + const char *clock_type; + int i; + + if (!dev->of_node) { + dev_err(dev, "Error: empty of_node\n"); + return -ENXIO; + } + usbn_node = dev->of_node->parent; + + i = of_property_read_u32(usbn_node, + "clock-frequency", &clock_rate); + if (i) + i = of_property_read_u32(usbn_node, + "refclk-frequency", &clock_rate); + if (i) { + dev_err(dev, "No USBN \"clock-frequency\"\n"); + return -ENXIO; + } + switch (clock_rate) { + case 12000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; + break; + case 24000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; + break; + case 48000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; + break; + default: + dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n", + clock_rate); + return -ENXIO; + } + + i = of_property_read_string(usbn_node, + "cavium,refclk-type", &clock_type); + if (i) + i = of_property_read_string(usbn_node, + "refclk-type", &clock_type); + + if (!i && strcmp("crystal", clock_type) == 0) + is_crystal_clock = true; + + if (is_crystal_clock) + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; + else + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) { + dev_err(dev, "found no memory resource\n"); + return -ENXIO; + } + usb_num = (res_mem->start >> 44) & 1; + + if (irq < 0) { + /* Defective device tree, but we know how to fix it. */ + irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; + + irq = irq_create_mapping(NULL, hwirq); + } + + /* + * Set the DMA mask to 64bits so we get buffers already translated for + * DMA. + */ + i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (i) + return i; + + /* + * Only cn52XX and cn56XX have DWC_OTG USB hardware and the + * IOB priority registers. Under heavy network load USB + * hardware can be starved by the IOB causing a crash. Give + * it a priority boost if it has been waiting more than 400 + * cycles to avoid this situation. + * + * Testing indicates that a cnt_val of 8192 is not sufficient, + * but no failures are seen with 4096. We choose a value of + * 400 to give a safety factor of 10. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { + union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; + + pri_cnt.u64 = 0; + pri_cnt.s.cnt_enb = 1; + pri_cnt.s.cnt_val = 400; + cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); + } + + hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); + if (!hcd) { + dev_dbg(dev, "Failed to allocate memory for HCD\n"); + return -1; + } + hcd->uses_new_polling = 1; + usb = (struct octeon_hcd *)hcd->hcd_priv; + + spin_lock_init(&usb->lock); + + usb->init_flags = initialize_flags; + + /* Initialize the USB state structure */ + usb->index = usb_num; + INIT_LIST_HEAD(&usb->idle_pipes); + for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++) + INIT_LIST_HEAD(&usb->active_pipes[i]); + + /* Due to an errata, CN31XX doesn't support DMA */ + if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { + usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA; + /* Only use one channel with non DMA */ + usb->idle_hardware_channels = 0x1; + } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + /* CN5XXX have an errata with channel 3 */ + usb->idle_hardware_channels = 0xf7; + } else { + usb->idle_hardware_channels = 0xff; + } + + status = cvmx_usb_initialize(dev, usb); + if (status) { + dev_dbg(dev, "USB initialization failed with %d\n", status); + usb_put_hcd(hcd); + return -1; + } + + status = usb_add_hcd(hcd, irq, 0); + if (status) { + dev_dbg(dev, "USB add HCD failed with %d\n", status); + usb_put_hcd(hcd); + return -1; + } + device_wakeup_enable(hcd->self.controller); + + dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); + + return 0; +} + +static int octeon_usb_remove(struct platform_device *pdev) +{ + int status; + struct device *dev = &pdev->dev; + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct octeon_hcd *usb = hcd_to_octeon(hcd); + unsigned long flags; + + usb_remove_hcd(hcd); + spin_lock_irqsave(&usb->lock, flags); + status = cvmx_usb_shutdown(usb); + spin_unlock_irqrestore(&usb->lock, flags); + if (status) + dev_dbg(dev, "USB shutdown failed with %d\n", status); + + usb_put_hcd(hcd); + + return 0; +} + +static const struct of_device_id octeon_usb_match[] = { + { + .compatible = "cavium,octeon-5750-usbc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_usb_match); + +static struct platform_driver octeon_usb_driver = { + .driver = { + .name = "octeon-hcd", + .of_match_table = octeon_usb_match, + }, + .probe = octeon_usb_probe, + .remove = octeon_usb_remove, +}; + +static int __init octeon_usb_driver_init(void) +{ + if (usb_disabled()) + return 0; + + return platform_driver_register(&octeon_usb_driver); +} +module_init(octeon_usb_driver_init); + +static void __exit octeon_usb_driver_exit(void) +{ + if (usb_disabled()) + return; + + platform_driver_unregister(&octeon_usb_driver); +} +module_exit(octeon_usb_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h new file mode 100644 index 000000000000..9ed619c93a4e --- /dev/null +++ b/drivers/staging/octeon-usb/octeon-hcd.h @@ -0,0 +1,1847 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Octeon HCD hardware register definitions. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Some parts of the code were originally released under BSD license: + * + * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of Cavium Networks nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * This Software, including technical data, may be subject to U.S. export + * control laws, including the U.S. Export Administration Act and its associated + * regulations, and may be subject to export or import regulations in other + * countries. + * + * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" + * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR + * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO + * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION + * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM + * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, + * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF + * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR + * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR + * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. + */ + +#ifndef __OCTEON_HCD_H__ +#define __OCTEON_HCD_H__ + +#include <asm/bitfield.h> + +#define CVMX_USBCXBASE 0x00016F0010000000ull +#define CVMX_USBCXREG1(reg, bid) \ + (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ + ((bid) & 1) * 0x100000000000ull) +#define CVMX_USBCXREG2(reg, bid, off) \ + (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \ + (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32) + +#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid) +#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid) +#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid) +#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid) +#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid) +#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid) +#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid) +#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid) +#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid) +#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid) +#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid) +#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid) +#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid) +#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off) +#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid) +#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off) +#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off) +#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off) +#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off) +#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid) +#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid) +#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid) +#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid) +#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid) + +#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull) +#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull) + +#define CVMX_USBNXREG1(reg, bid) \ + (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid)) +#define CVMX_USBNXREG2(reg, bid) \ + (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid)) + +#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid) +#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid) +#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid) +#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid) + +/** + * cvmx_usbc#_gahbcfg + * + * Core AHB Configuration Register (GAHBCFG) + * + * This register can be used to configure the core after power-on or a change in + * mode of operation. This register mainly contains AHB system-related + * configuration parameters. The AHB is the processor interface to the O2P USB + * core. In general, software need not know about this interface except to + * program the values as specified. + * + * The application must program this register as part of the O2P USB core + * initialization. Do not change this register after the initial programming. + */ +union cvmx_usbcx_gahbcfg { + u32 u32; + /** + * struct cvmx_usbcx_gahbcfg_s + * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl) + * Software should set this bit to 0x1. + * Indicates when the Periodic TxFIFO Empty Interrupt bit in the + * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This + * bit is used only in Slave mode. + * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic + * TxFIFO is half empty + * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic + * TxFIFO is completely empty + * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl) + * Software should set this bit to 0x1. + * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in + * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered. + * This bit is used only in Slave mode. + * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non- + * Periodic TxFIFO is half empty + * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non- + * Periodic TxFIFO is completely empty + * @dmaen: DMA Enable (DMAEn) + * * 1'b0: Core operates in Slave mode + * * 1'b1: Core operates in a DMA mode + * @hbstlen: Burst Length/Type (HBstLen) + * This field has not effect and should be left as 0x0. + * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk) + * Software should set this field to 0x1. + * The application uses this bit to mask or unmask the interrupt + * line assertion to itself. Irrespective of this bit's setting, + * the interrupt status registers are updated by the core. + * * 1'b0: Mask the interrupt assertion to the application. + * * 1'b1: Unmask the interrupt assertion to the application. + */ + struct cvmx_usbcx_gahbcfg_s { + __BITFIELD_FIELD(u32 reserved_9_31 : 23, + __BITFIELD_FIELD(u32 ptxfemplvl : 1, + __BITFIELD_FIELD(u32 nptxfemplvl : 1, + __BITFIELD_FIELD(u32 reserved_6_6 : 1, + __BITFIELD_FIELD(u32 dmaen : 1, + __BITFIELD_FIELD(u32 hbstlen : 4, + __BITFIELD_FIELD(u32 glblintrmsk : 1, + ;))))))) + } s; +}; + +/** + * cvmx_usbc#_ghwcfg3 + * + * User HW Config3 Register (GHWCFG3) + * + * This register contains the configuration options of the O2P USB core. + */ +union cvmx_usbcx_ghwcfg3 { + u32 u32; + /** + * struct cvmx_usbcx_ghwcfg3_s + * @dfifodepth: DFIFO Depth (DfifoDepth) + * This value is in terms of 32-bit words. + * * Minimum value is 32 + * * Maximum value is 32768 + * @ahbphysync: AHB and PHY Synchronous (AhbPhySync) + * Indicates whether AHB and PHY clocks are synchronous to + * each other. + * * 1'b0: No + * * 1'b1: Yes + * This bit is tied to 1. + * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType) + * * 1'b0: Asynchronous reset is used in the core + * * 1'b1: Synchronous reset is used in the core + * @optfeature: Optional Features Removed (OptFeature) + * Indicates whether the User ID register, GPIO interface ports, + * and SOF toggle and counter ports were removed for gate count + * optimization. + * @vendor_control_interface_support: Vendor Control Interface Support + * * 1'b0: Vendor Control Interface is not available on the core. + * * 1'b1: Vendor Control Interface is available. + * @i2c_selection: I2C Selection + * * 1'b0: I2C Interface is not available on the core. + * * 1'b1: I2C Interface is available on the core. + * @otgen: OTG Function Enabled (OtgEn) + * The application uses this bit to indicate the O2P USB core's + * OTG capabilities. + * * 1'b0: Not OTG capable + * * 1'b1: OTG Capable + * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth) + * * 3'b000: 4 bits + * * 3'b001: 5 bits + * * 3'b010: 6 bits + * * 3'b011: 7 bits + * * 3'b100: 8 bits + * * 3'b101: 9 bits + * * 3'b110: 10 bits + * * Others: Reserved + * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth) + * * 4'b0000: 11 bits + * * 4'b0001: 12 bits + * - ... + * * 4'b1000: 19 bits + * * Others: Reserved + */ + struct cvmx_usbcx_ghwcfg3_s { + __BITFIELD_FIELD(u32 dfifodepth : 16, + __BITFIELD_FIELD(u32 reserved_13_15 : 3, + __BITFIELD_FIELD(u32 ahbphysync : 1, + __BITFIELD_FIELD(u32 rsttype : 1, + __BITFIELD_FIELD(u32 optfeature : 1, + __BITFIELD_FIELD(u32 vendor_control_interface_support : 1, + __BITFIELD_FIELD(u32 i2c_selection : 1, + __BITFIELD_FIELD(u32 otgen : 1, + __BITFIELD_FIELD(u32 pktsizewidth : 3, + __BITFIELD_FIELD(u32 xfersizewidth : 4, + ;)))))))))) + } s; +}; + +/** + * cvmx_usbc#_gintmsk + * + * Core Interrupt Mask Register (GINTMSK) + * + * This register works with the Core Interrupt register to interrupt the + * application. When an interrupt bit is masked, the interrupt associated with + * that bit will not be generated. However, the Core Interrupt (GINTSTS) + * register bit corresponding to that interrupt will still be set. + * Mask interrupt: 1'b0, Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_gintmsk { + u32 u32; + /** + * struct cvmx_usbcx_gintmsk_s + * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask + * (WkUpIntMsk) + * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask + * (SessReqIntMsk) + * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk) + * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk) + * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk) + * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk) + * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk) + * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk) + * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk) + * Incomplete Isochronous OUT Transfer Mask + * (incompISOOUTMsk) + * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask + * (incompISOINMsk) + * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk) + * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk) + * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk) + * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk) + * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask + * (ISOOutDropMsk) + * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk) + * @usbrstmsk: USB Reset Mask (USBRstMsk) + * @usbsuspmsk: USB Suspend Mask (USBSuspMsk) + * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk) + * @i2cint: I2C Interrupt Mask (I2CINT) + * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk) + * I2C Carkit Interrupt Mask (I2CCKINTMsk) + * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk) + * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask + * (GINNakEffMsk) + * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk) + * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk) + * @sofmsk: Start of (micro)Frame Mask (SofMsk) + * @otgintmsk: OTG Interrupt Mask (OTGIntMsk) + * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk) + */ + struct cvmx_usbcx_gintmsk_s { + __BITFIELD_FIELD(u32 wkupintmsk : 1, + __BITFIELD_FIELD(u32 sessreqintmsk : 1, + __BITFIELD_FIELD(u32 disconnintmsk : 1, + __BITFIELD_FIELD(u32 conidstschngmsk : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfempmsk : 1, + __BITFIELD_FIELD(u32 hchintmsk : 1, + __BITFIELD_FIELD(u32 prtintmsk : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsuspmsk : 1, + __BITFIELD_FIELD(u32 incomplpmsk : 1, + __BITFIELD_FIELD(u32 incompisoinmsk : 1, + __BITFIELD_FIELD(u32 oepintmsk : 1, + __BITFIELD_FIELD(u32 inepintmsk : 1, + __BITFIELD_FIELD(u32 epmismsk : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopfmsk : 1, + __BITFIELD_FIELD(u32 isooutdropmsk : 1, + __BITFIELD_FIELD(u32 enumdonemsk : 1, + __BITFIELD_FIELD(u32 usbrstmsk : 1, + __BITFIELD_FIELD(u32 usbsuspmsk : 1, + __BITFIELD_FIELD(u32 erlysuspmsk : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickintmsk : 1, + __BITFIELD_FIELD(u32 goutnakeffmsk : 1, + __BITFIELD_FIELD(u32 ginnakeffmsk : 1, + __BITFIELD_FIELD(u32 nptxfempmsk : 1, + __BITFIELD_FIELD(u32 rxflvlmsk : 1, + __BITFIELD_FIELD(u32 sofmsk : 1, + __BITFIELD_FIELD(u32 otgintmsk : 1, + __BITFIELD_FIELD(u32 modemismsk : 1, + __BITFIELD_FIELD(u32 reserved_0_0 : 1, + ;)))))))))))))))))))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_gintsts + * + * Core Interrupt Register (GINTSTS) + * + * This register interrupts the application for system-level events in the + * current mode of operation (Device mode or Host mode). It is shown in + * Interrupt. Some of the bits in this register are valid only in Host mode, + * while others are valid in Device mode only. This register also indicates the + * current mode of operation. In order to clear the interrupt status bits of + * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status + * interrupts are read only; once software reads from or writes to the FIFO + * while servicing these interrupts, FIFO interrupt conditions are cleared + * automatically. + */ +union cvmx_usbcx_gintsts { + u32 u32; + /** + * struct cvmx_usbcx_gintsts_s + * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt) + * In Device mode, this interrupt is asserted when a resume is + * detected on the USB. In Host mode, this interrupt is asserted + * when a remote wakeup is detected on the USB. + * For more information on how to use this interrupt, see "Partial + * Power-Down and Clock Gating Programming Model" on + * page 353. + * @sessreqint: Session Request/New Session Detected Interrupt + * (SessReqInt) + * In Host mode, this interrupt is asserted when a session request + * is detected from the device. In Device mode, this interrupt is + * asserted when the utmiotg_bvalid signal goes high. + * For more information on how to use this interrupt, see "Partial + * Power-Down and Clock Gating Programming Model" on + * page 353. + * @disconnint: Disconnect Detected Interrupt (DisconnInt) + * Asserted when a device disconnect is detected. + * @conidstschng: Connector ID Status Change (ConIDStsChng) + * The core sets this bit when there is a change in connector ID + * status. + * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp) + * Asserted when the Periodic Transmit FIFO is either half or + * completely empty and there is space for at least one entry to be + * written in the Periodic Request Queue. The half or completely + * empty status is determined by the Periodic TxFIFO Empty Level + * bit in the Core AHB Configuration register + * (GAHBCFG.PTxFEmpLvl). + * @hchint: Host Channels Interrupt (HChInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the channels of the core (in Host mode). The + * application must read the Host All Channels Interrupt (HAINT) + * register to determine the exact number of the channel on which + * the interrupt occurred, and then read the corresponding Host + * Channel-n Interrupt (HCINTn) register to determine the exact + * cause of the interrupt. The application must clear the + * appropriate status bit in the HCINTn register to clear this bit. + * @prtint: Host Port Interrupt (PrtInt) + * The core sets this bit to indicate a change in port status of + * one of the O2P USB core ports in Host mode. The application must + * read the Host Port Control and Status (HPRT) register to + * determine the exact event that caused this interrupt. The + * application must clear the appropriate status bit in the Host + * Port Control and Status register to clear this bit. + * @fetsusp: Data Fetch Suspended (FetSusp) + * This interrupt is valid only in DMA mode. This interrupt + * indicates that the core has stopped fetching data for IN + * endpoints due to the unavailability of TxFIFO space or Request + * Queue space. This interrupt is used by the application for an + * endpoint mismatch algorithm. + * @incomplp: Incomplete Periodic Transfer (incomplP) + * In Host mode, the core sets this interrupt bit when there are + * incomplete periodic transactions still pending which are + * scheduled for the current microframe. + * Incomplete Isochronous OUT Transfer (incompISOOUT) + * The Device mode, the core sets this interrupt to indicate that + * there is at least one isochronous OUT endpoint on which the + * transfer is not completed in the current microframe. This + * interrupt is asserted along with the End of Periodic Frame + * Interrupt (EOPF) bit in this register. + * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN) + * The core sets this interrupt to indicate that there is at least + * one isochronous IN endpoint on which the transfer is not + * completed in the current microframe. This interrupt is asserted + * along with the End of Periodic Frame Interrupt (EOPF) bit in + * this register. + * @oepint: OUT Endpoints Interrupt (OEPInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the OUT endpoints of the core (in Device mode). The + * application must read the Device All Endpoints Interrupt + * (DAINT) register to determine the exact number of the OUT + * endpoint on which the interrupt occurred, and then read the + * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn) + * register to determine the exact cause of the interrupt. The + * application must clear the appropriate status bit in the + * corresponding DOEPINTn register to clear this bit. + * @iepint: IN Endpoints Interrupt (IEPInt) + * The core sets this bit to indicate that an interrupt is pending + * on one of the IN endpoints of the core (in Device mode). The + * application must read the Device All Endpoints Interrupt + * (DAINT) register to determine the exact number of the IN + * endpoint on which the interrupt occurred, and then read the + * corresponding Device IN Endpoint-n Interrupt (DIEPINTn) + * register to determine the exact cause of the interrupt. The + * application must clear the appropriate status bit in the + * corresponding DIEPINTn register to clear this bit. + * @epmis: Endpoint Mismatch Interrupt (EPMis) + * Indicates that an IN token has been received for a non-periodic + * endpoint, but the data for another endpoint is present in the + * top of the Non-Periodic Transmit FIFO and the IN endpoint + * mismatch count programmed by the application has expired. + * @eopf: End of Periodic Frame Interrupt (EOPF) + * Indicates that the period specified in the Periodic Frame + * Interval field of the Device Configuration register + * (DCFG.PerFrInt) has been reached in the current microframe. + * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop) + * The core sets this bit when it fails to write an isochronous OUT + * packet into the RxFIFO because the RxFIFO doesn't have + * enough space to accommodate a maximum packet size packet + * for the isochronous OUT endpoint. + * @enumdone: Enumeration Done (EnumDone) + * The core sets this bit to indicate that speed enumeration is + * complete. The application must read the Device Status (DSTS) + * register to obtain the enumerated speed. + * @usbrst: USB Reset (USBRst) + * The core sets this bit to indicate that a reset is detected on + * the USB. + * @usbsusp: USB Suspend (USBSusp) + * The core sets this bit to indicate that a suspend was detected + * on the USB. The core enters the Suspended state when there + * is no activity on the phy_line_state_i signal for an extended + * period of time. + * @erlysusp: Early Suspend (ErlySusp) + * The core sets this bit to indicate that an Idle state has been + * detected on the USB for 3 ms. + * @i2cint: I2C Interrupt (I2CINT) + * This bit is always 0x0. + * @ulpickint: ULPI Carkit Interrupt (ULPICKINT) + * This bit is always 0x0. + * @goutnakeff: Global OUT NAK Effective (GOUTNakEff) + * Indicates that the Set Global OUT NAK bit in the Device Control + * register (DCTL.SGOUTNak), set by the application, has taken + * effect in the core. This bit can be cleared by writing the Clear + * Global OUT NAK bit in the Device Control register + * (DCTL.CGOUTNak). + * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff) + * Indicates that the Set Global Non-Periodic IN NAK bit in the + * Device Control register (DCTL.SGNPInNak), set by the + * application, has taken effect in the core. That is, the core has + * sampled the Global IN NAK bit set by the application. This bit + * can be cleared by clearing the Clear Global Non-Periodic IN + * NAK bit in the Device Control register (DCTL.CGNPInNak). + * This interrupt does not necessarily mean that a NAK handshake + * is sent out on the USB. The STALL bit takes precedence over + * the NAK bit. + * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp) + * This interrupt is asserted when the Non-Periodic TxFIFO is + * either half or completely empty, and there is space for at least + * one entry to be written to the Non-Periodic Transmit Request + * Queue. The half or completely empty status is determined by + * the Non-Periodic TxFIFO Empty Level bit in the Core AHB + * Configuration register (GAHBCFG.NPTxFEmpLvl). + * @rxflvl: RxFIFO Non-Empty (RxFLvl) + * Indicates that there is at least one packet pending to be read + * from the RxFIFO. + * @sof: Start of (micro)Frame (Sof) + * In Host mode, the core sets this bit to indicate that an SOF + * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the + * USB. The application must write a 1 to this bit to clear the + * interrupt. + * In Device mode, in the core sets this bit to indicate that an + * SOF token has been received on the USB. The application can read + * the Device Status register to get the current (micro)frame + * number. This interrupt is seen only when the core is operating + * at either HS or FS. + * @otgint: OTG Interrupt (OTGInt) + * The core sets this bit to indicate an OTG protocol event. The + * application must read the OTG Interrupt Status (GOTGINT) + * register to determine the exact event that caused this + * interrupt. The application must clear the appropriate status bit + * in the GOTGINT register to clear this bit. + * @modemis: Mode Mismatch Interrupt (ModeMis) + * The core sets this bit when the application is trying to access: + * * A Host mode register, when the core is operating in Device + * mode + * * A Device mode register, when the core is operating in Host + * mode + * The register access is completed on the AHB with an OKAY + * response, but is ignored by the core internally and doesn't + * affect the operation of the core. + * @curmod: Current Mode of Operation (CurMod) + * Indicates the current mode of operation. + * * 1'b0: Device mode + * * 1'b1: Host mode + */ + struct cvmx_usbcx_gintsts_s { + __BITFIELD_FIELD(u32 wkupint : 1, + __BITFIELD_FIELD(u32 sessreqint : 1, + __BITFIELD_FIELD(u32 disconnint : 1, + __BITFIELD_FIELD(u32 conidstschng : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfemp : 1, + __BITFIELD_FIELD(u32 hchint : 1, + __BITFIELD_FIELD(u32 prtint : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsusp : 1, + __BITFIELD_FIELD(u32 incomplp : 1, + __BITFIELD_FIELD(u32 incompisoin : 1, + __BITFIELD_FIELD(u32 oepint : 1, + __BITFIELD_FIELD(u32 iepint : 1, + __BITFIELD_FIELD(u32 epmis : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopf : 1, + __BITFIELD_FIELD(u32 isooutdrop : 1, + __BITFIELD_FIELD(u32 enumdone : 1, + __BITFIELD_FIELD(u32 usbrst : 1, + __BITFIELD_FIELD(u32 usbsusp : 1, + __BITFIELD_FIELD(u32 erlysusp : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickint : 1, + __BITFIELD_FIELD(u32 goutnakeff : 1, + __BITFIELD_FIELD(u32 ginnakeff : 1, + __BITFIELD_FIELD(u32 nptxfemp : 1, + __BITFIELD_FIELD(u32 rxflvl : 1, + __BITFIELD_FIELD(u32 sof : 1, + __BITFIELD_FIELD(u32 otgint : 1, + __BITFIELD_FIELD(u32 modemis : 1, + __BITFIELD_FIELD(u32 curmod : 1, + ;)))))))))))))))))))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_gnptxfsiz + * + * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ) + * + * The application can program the RAM size and the memory start address for the + * Non-Periodic TxFIFO. + */ +union cvmx_usbcx_gnptxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_gnptxfsiz_s + * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep) + * This value is in terms of 32-bit words. + * Minimum value is 16 + * Maximum value is 32768 + * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr) + * This field contains the memory start address for Non-Periodic + * Transmit FIFO RAM. + */ + struct cvmx_usbcx_gnptxfsiz_s { + __BITFIELD_FIELD(u32 nptxfdep : 16, + __BITFIELD_FIELD(u32 nptxfstaddr : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_gnptxsts + * + * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS) + * + * This read-only register contains the free space information for the + * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue. + */ +union cvmx_usbcx_gnptxsts { + u32 u32; + /** + * struct cvmx_usbcx_gnptxsts_s + * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop) + * Entry in the Non-Periodic Tx Request Queue that is currently + * being processed by the MAC. + * * Bits [30:27]: Channel/endpoint number + * * Bits [26:25]: + * - 2'b00: IN/OUT token + * - 2'b01: Zero-length transmit packet (device IN/host OUT) + * - 2'b10: PING/CSPLIT token + * - 2'b11: Channel halt command + * * Bit [24]: Terminate (last entry for selected channel/endpoint) + * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available + * (NPTxQSpcAvail) + * Indicates the amount of free space available in the Non- + * Periodic Transmit Request Queue. This queue holds both IN + * and OUT requests in Host mode. Device mode has only IN + * requests. + * * 8'h0: Non-Periodic Transmit Request Queue is full + * * 8'h1: 1 location available + * * 8'h2: 2 locations available + * * n: n locations available (0..8) + * * Others: Reserved + * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail) + * Indicates the amount of free space available in the Non- + * Periodic TxFIFO. + * Values are in terms of 32-bit words. + * * 16'h0: Non-Periodic TxFIFO is full + * * 16'h1: 1 word available + * * 16'h2: 2 words available + * * 16'hn: n words available (where 0..32768) + * * 16'h8000: 32768 words available + * * Others: Reserved + */ + struct cvmx_usbcx_gnptxsts_s { + __BITFIELD_FIELD(u32 reserved_31_31 : 1, + __BITFIELD_FIELD(u32 nptxqtop : 7, + __BITFIELD_FIELD(u32 nptxqspcavail : 8, + __BITFIELD_FIELD(u32 nptxfspcavail : 16, + ;)))) + } s; +}; + +/** + * cvmx_usbc#_grstctl + * + * Core Reset Register (GRSTCTL) + * + * The application uses this register to reset various hardware features inside + * the core. + */ +union cvmx_usbcx_grstctl { + u32 u32; + /** + * struct cvmx_usbcx_grstctl_s + * @ahbidle: AHB Master Idle (AHBIdle) + * Indicates that the AHB Master State Machine is in the IDLE + * condition. + * @dmareq: DMA Request Signal (DMAReq) + * Indicates that the DMA request is in progress. Used for debug. + * @txfnum: TxFIFO Number (TxFNum) + * This is the FIFO number that must be flushed using the TxFIFO + * Flush bit. This field must not be changed until the core clears + * the TxFIFO Flush bit. + * * 5'h0: Non-Periodic TxFIFO flush + * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic + * TxFIFO flush in Host mode + * * 5'h2: Periodic TxFIFO 2 flush in Device mode + * - ... + * * 5'hF: Periodic TxFIFO 15 flush in Device mode + * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the + * core + * @txfflsh: TxFIFO Flush (TxFFlsh) + * This bit selectively flushes a single or all transmit FIFOs, but + * cannot do so if the core is in the midst of a transaction. + * The application must only write this bit after checking that the + * core is neither writing to the TxFIFO nor reading from the + * TxFIFO. + * The application must wait until the core clears this bit before + * performing any operations. This bit takes 8 clocks (of phy_clk + * or hclk, whichever is slower) to clear. + * @rxfflsh: RxFIFO Flush (RxFFlsh) + * The application can flush the entire RxFIFO using this bit, but + * must first ensure that the core is not in the middle of a + * transaction. + * The application must only write to this bit after checking that + * the core is neither reading from the RxFIFO nor writing to the + * RxFIFO. + * The application must wait until the bit is cleared before + * performing any other operations. This bit will take 8 clocks + * (slowest of PHY or AHB clock) to clear. + * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh) + * The application writes this bit to flush the IN Token Sequence + * Learning Queue. + * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst) + * The application writes this bit to reset the (micro)frame number + * counter inside the core. When the (micro)frame counter is reset, + * the subsequent SOF sent out by the core will have a + * (micro)frame number of 0. + * @hsftrst: HClk Soft Reset (HSftRst) + * The application uses this bit to flush the control logic in the + * AHB Clock domain. Only AHB Clock Domain pipelines are reset. + * * FIFOs are not flushed with this bit. + * * All state machines in the AHB clock domain are reset to the + * Idle state after terminating the transactions on the AHB, + * following the protocol. + * * CSR control bits used by the AHB clock domain state + * machines are cleared. + * * To clear this interrupt, status mask bits that control the + * interrupt status and are generated by the AHB clock domain + * state machine are cleared. + * * Because interrupt status bits are not cleared, the application + * can get the status of any core events that occurred after it set + * this bit. + * This is a self-clearing bit that the core clears after all + * necessary logic is reset in the core. This may take several + * clocks, depending on the core's current state. + * @csftrst: Core Soft Reset (CSftRst) + * Resets the hclk and phy_clock domains as follows: + * * Clears the interrupts and all the CSR registers except the + * following register bits: + * - PCGCCTL.RstPdwnModule + * - PCGCCTL.GateHclk + * - PCGCCTL.PwrClmp + * - PCGCCTL.StopPPhyLPwrClkSelclk + * - GUSBCFG.PhyLPwrClkSel + * - GUSBCFG.DDRSel + * - GUSBCFG.PHYSel + * - GUSBCFG.FSIntf + * - GUSBCFG.ULPI_UTMI_Sel + * - GUSBCFG.PHYIf + * - HCFG.FSLSPclkSel + * - DCFG.DevSpd + * * All module state machines (except the AHB Slave Unit) are + * reset to the IDLE state, and all the transmit FIFOs and the + * receive FIFO are flushed. + * * Any transactions on the AHB Master are terminated as soon + * as possible, after gracefully completing the last data phase of + * an AHB transfer. Any transactions on the USB are terminated + * immediately. + * The application can write to this bit any time it wants to reset + * the core. This is a self-clearing bit and the core clears this + * bit after all the necessary logic is reset in the core, which + * may take several clocks, depending on the current state of the + * core. Once this bit is cleared software should wait at least 3 + * PHY clocks before doing any access to the PHY domain + * (synchronization delay). Software should also should check that + * bit 31 of this register is 1 (AHB Master is IDLE) before + * starting any operation. + * Typically software reset is used during software development + * and also when you dynamically change the PHY selection bits + * in the USB configuration registers listed above. When you + * change the PHY, the corresponding clock for the PHY is + * selected and used in the PHY domain. Once a new clock is + * selected, the PHY domain has to be reset for proper operation. + */ + struct cvmx_usbcx_grstctl_s { + __BITFIELD_FIELD(u32 ahbidle : 1, + __BITFIELD_FIELD(u32 dmareq : 1, + __BITFIELD_FIELD(u32 reserved_11_29 : 19, + __BITFIELD_FIELD(u32 txfnum : 5, + __BITFIELD_FIELD(u32 txfflsh : 1, + __BITFIELD_FIELD(u32 rxfflsh : 1, + __BITFIELD_FIELD(u32 intknqflsh : 1, + __BITFIELD_FIELD(u32 frmcntrrst : 1, + __BITFIELD_FIELD(u32 hsftrst : 1, + __BITFIELD_FIELD(u32 csftrst : 1, + ;)))))))))) + } s; +}; + +/** + * cvmx_usbc#_grxfsiz + * + * Receive FIFO Size Register (GRXFSIZ) + * + * The application can program the RAM size that must be allocated to the + * RxFIFO. + */ +union cvmx_usbcx_grxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_grxfsiz_s + * @rxfdep: RxFIFO Depth (RxFDep) + * This value is in terms of 32-bit words. + * * Minimum value is 16 + * * Maximum value is 32768 + */ + struct cvmx_usbcx_grxfsiz_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 rxfdep : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_grxstsph + * + * Receive Status Read and Pop Register, Host Mode (GRXSTSPH) + * + * A read to the Receive Status Read and Pop register returns and additionally + * pops the top data entry out of the RxFIFO. + * This Description is only valid when the core is in Host Mode. For Device Mode + * use USBC_GRXSTSPD instead. + * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the + * same offset in the O2P USB core. The offset difference shown in this + * document is for software clarity and is actually ignored by the + * hardware. + */ +union cvmx_usbcx_grxstsph { + u32 u32; + /** + * struct cvmx_usbcx_grxstsph_s + * @pktsts: Packet Status (PktSts) + * Indicates the status of the received packet + * * 4'b0010: IN data packet received + * * 4'b0011: IN transfer completed (triggers an interrupt) + * * 4'b0101: Data toggle error (triggers an interrupt) + * * 4'b0111: Channel halted (triggers an interrupt) + * * Others: Reserved + * @dpid: Data PID (DPID) + * * 2'b00: DATA0 + * * 2'b10: DATA1 + * * 2'b01: DATA2 + * * 2'b11: MDATA + * @bcnt: Byte Count (BCnt) + * Indicates the byte count of the received IN data packet + * @chnum: Channel Number (ChNum) + * Indicates the channel number to which the current received + * packet belongs. + */ + struct cvmx_usbcx_grxstsph_s { + __BITFIELD_FIELD(u32 reserved_21_31 : 11, + __BITFIELD_FIELD(u32 pktsts : 4, + __BITFIELD_FIELD(u32 dpid : 2, + __BITFIELD_FIELD(u32 bcnt : 11, + __BITFIELD_FIELD(u32 chnum : 4, + ;))))) + } s; +}; + +/** + * cvmx_usbc#_gusbcfg + * + * Core USB Configuration Register (GUSBCFG) + * + * This register can be used to configure the core after power-on or a changing + * to Host mode or Device mode. It contains USB and USB-PHY related + * configuration parameters. The application must program this register before + * starting any transactions on either the AHB or the USB. Do not make changes + * to this register after the initial programming. + */ +union cvmx_usbcx_gusbcfg { + u32 u32; + /** + * struct cvmx_usbcx_gusbcfg_s + * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel) + * This bit is always 0x0. + * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel) + * Software should set this bit to 0x0. + * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In + * FS and LS modes, the PHY can usually operate on a 48-MHz + * clock to save power. + * * 1'b0: 480-MHz Internal PLL clock + * * 1'b1: 48-MHz External Clock + * In 480 MHz mode, the UTMI interface operates at either 60 or + * 30-MHz, depending upon whether 8- or 16-bit data width is + * selected. In 48-MHz mode, the UTMI interface operates at 48 + * MHz in FS mode and at either 48 or 6 MHz in LS mode + * (depending on the PHY vendor). + * This bit drives the utmi_fsls_low_power core output signal, and + * is valid only for UTMI+ PHYs. + * @usbtrdtim: USB Turnaround Time (USBTrdTim) + * Sets the turnaround time in PHY clocks. + * Specifies the response time for a MAC request to the Packet + * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM). + * This must be programmed to 0x5. + * @hnpcap: HNP-Capable (HNPCap) + * This bit is always 0x0. + * @srpcap: SRP-Capable (SRPCap) + * This bit is always 0x0. + * @ddrsel: ULPI DDR Select (DDRSel) + * Software should set this bit to 0x0. + * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial + * Software should set this bit to 0x0. + * @fsintf: Full-Speed Serial Interface Select (FSIntf) + * Software should set this bit to 0x0. + * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel) + * This bit is always 0x0. + * @phyif: PHY Interface (PHYIf) + * This bit is always 0x1. + * @toutcal: HS/FS Timeout Calibration (TOutCal) + * The number of PHY clocks that the application programs in this + * field is added to the high-speed/full-speed interpacket timeout + * duration in the core to account for any additional delays + * introduced by the PHY. This may be required, since the delay + * introduced by the PHY in generating the linestate condition may + * vary from one PHY to another. + * The USB standard timeout value for high-speed operation is + * 736 to 816 (inclusive) bit times. The USB standard timeout + * value for full-speed operation is 16 to 18 (inclusive) bit + * times. The application must program this field based on the + * speed of enumeration. The number of bit times added per PHY + * clock are: + * High-speed operation: + * * One 30-MHz PHY clock = 16 bit times + * * One 60-MHz PHY clock = 8 bit times + * Full-speed operation: + * * One 30-MHz PHY clock = 0.4 bit times + * * One 60-MHz PHY clock = 0.2 bit times + * * One 48-MHz PHY clock = 0.25 bit times + */ + struct cvmx_usbcx_gusbcfg_s { + __BITFIELD_FIELD(u32 reserved_17_31 : 15, + __BITFIELD_FIELD(u32 otgi2csel : 1, + __BITFIELD_FIELD(u32 phylpwrclksel : 1, + __BITFIELD_FIELD(u32 reserved_14_14 : 1, + __BITFIELD_FIELD(u32 usbtrdtim : 4, + __BITFIELD_FIELD(u32 hnpcap : 1, + __BITFIELD_FIELD(u32 srpcap : 1, + __BITFIELD_FIELD(u32 ddrsel : 1, + __BITFIELD_FIELD(u32 physel : 1, + __BITFIELD_FIELD(u32 fsintf : 1, + __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1, + __BITFIELD_FIELD(u32 phyif : 1, + __BITFIELD_FIELD(u32 toutcal : 3, + ;))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_haint + * + * Host All Channels Interrupt Register (HAINT) + * + * When a significant event occurs on a channel, the Host All Channels Interrupt + * register interrupts the application using the Host Channels Interrupt bit of + * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt. + * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in + * this register are set and cleared when the application sets and clears bits + * in the corresponding Host Channel-n Interrupt register. + */ +union cvmx_usbcx_haint { + u32 u32; + /** + * struct cvmx_usbcx_haint_s + * @haint: Channel Interrupts (HAINT) + * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 + */ + struct cvmx_usbcx_haint_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haint : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_haintmsk + * + * Host All Channels Interrupt Mask Register (HAINTMSK) + * + * The Host All Channel Interrupt Mask register works with the Host All Channel + * Interrupt register to interrupt the application when an event occurs on a + * channel. There is one interrupt mask bit per channel, up to a maximum of 16 + * bits. + * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_haintmsk { + u32 u32; + /** + * struct cvmx_usbcx_haintmsk_s + * @haintmsk: Channel Interrupt Mask (HAINTMsk) + * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 + */ + struct cvmx_usbcx_haintmsk_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haintmsk : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hcchar# + * + * Host Channel-n Characteristics Register (HCCHAR) + * + */ +union cvmx_usbcx_hccharx { + u32 u32; + /** + * struct cvmx_usbcx_hccharx_s + * @chena: Channel Enable (ChEna) + * This field is set by the application and cleared by the OTG + * host. + * * 1'b0: Channel disabled + * * 1'b1: Channel enabled + * @chdis: Channel Disable (ChDis) + * The application sets this bit to stop transmitting/receiving + * data on a channel, even before the transfer for that channel is + * complete. The application must wait for the Channel Disabled + * interrupt before treating the channel as disabled. + * @oddfrm: Odd Frame (OddFrm) + * This field is set (reset) by the application to indicate that + * the OTG host must perform a transfer in an odd (micro)frame. + * This field is applicable for only periodic (isochronous and + * interrupt) transactions. + * * 1'b0: Even (micro)frame + * * 1'b1: Odd (micro)frame + * @devaddr: Device Address (DevAddr) + * This field selects the specific device serving as the data + * source or sink. + * @ec: Multi Count (MC) / Error Count (EC) + * When the Split Enable bit of the Host Channel-n Split Control + * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates + * to the host the number of transactions that should be executed + * per microframe for this endpoint. + * * 2'b00: Reserved. This field yields undefined results. + * * 2'b01: 1 transaction + * * 2'b10: 2 transactions to be issued for this endpoint per + * microframe + * * 2'b11: 3 transactions to be issued for this endpoint per + * microframe + * When HCSPLTn.SpltEna is set (1'b1), this field indicates the + * number of immediate retries to be performed for a periodic split + * transactions on transaction errors. This field must be set to at + * least 2'b01. + * @eptype: Endpoint Type (EPType) + * Indicates the transfer type selected. + * * 2'b00: Control + * * 2'b01: Isochronous + * * 2'b10: Bulk + * * 2'b11: Interrupt + * @lspddev: Low-Speed Device (LSpdDev) + * This field is set by the application to indicate that this + * channel is communicating to a low-speed device. + * @epdir: Endpoint Direction (EPDir) + * Indicates whether the transaction is IN or OUT. + * * 1'b0: OUT + * * 1'b1: IN + * @epnum: Endpoint Number (EPNum) + * Indicates the endpoint number on the device serving as the + * data source or sink. + * @mps: Maximum Packet Size (MPS) + * Indicates the maximum packet size of the associated endpoint. + */ + struct cvmx_usbcx_hccharx_s { + __BITFIELD_FIELD(u32 chena : 1, + __BITFIELD_FIELD(u32 chdis : 1, + __BITFIELD_FIELD(u32 oddfrm : 1, + __BITFIELD_FIELD(u32 devaddr : 7, + __BITFIELD_FIELD(u32 ec : 2, + __BITFIELD_FIELD(u32 eptype : 2, + __BITFIELD_FIELD(u32 lspddev : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 epdir : 1, + __BITFIELD_FIELD(u32 epnum : 4, + __BITFIELD_FIELD(u32 mps : 11, + ;))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcfg + * + * Host Configuration Register (HCFG) + * + * This register configures the core after power-on. Do not make changes to this + * register after initializing the host. + */ +union cvmx_usbcx_hcfg { + u32 u32; + /** + * struct cvmx_usbcx_hcfg_s + * @fslssupp: FS- and LS-Only Support (FSLSSupp) + * The application uses this bit to control the core's enumeration + * speed. Using this bit, the application can make the core + * enumerate as a FS host, even if the connected device supports + * HS traffic. Do not make changes to this field after initial + * programming. + * * 1'b0: HS/FS/LS, based on the maximum speed supported by + * the connected device + * * 1'b1: FS/LS-only, even if the connected device can support HS + * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel) + * When the core is in FS Host mode + * * 2'b00: PHY clock is running at 30/60 MHz + * * 2'b01: PHY clock is running at 48 MHz + * * Others: Reserved + * When the core is in LS Host mode + * * 2'b00: PHY clock is running at 30/60 MHz. When the + * UTMI+/ULPI PHY Low Power mode is not selected, use + * 30/60 MHz. + * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+ + * PHY Low Power mode is selected, use 48MHz if the PHY + * supplies a 48 MHz clock during LS mode. + * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode, + * use 6 MHz when the UTMI+ PHY Low Power mode is + * selected and the PHY supplies a 6 MHz clock during LS + * mode. If you select a 6 MHz clock during LS mode, you must + * do a soft reset. + * * 2'b11: Reserved + */ + struct cvmx_usbcx_hcfg_s { + __BITFIELD_FIELD(u32 reserved_3_31 : 29, + __BITFIELD_FIELD(u32 fslssupp : 1, + __BITFIELD_FIELD(u32 fslspclksel : 2, + ;))) + } s; +}; + +/** + * cvmx_usbc#_hcint# + * + * Host Channel-n Interrupt Register (HCINT) + * + * This register indicates the status of a channel with respect to USB- and + * AHB-related events. The application must read this register when the Host + * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is + * set. Before the application can read this register, it must first read + * the Host All Channels Interrupt (HAINT) register to get the exact channel + * number for the Host Channel-n Interrupt register. The application must clear + * the appropriate bit in this register to clear the corresponding bits in the + * HAINT and GINTSTS registers. + */ +union cvmx_usbcx_hcintx { + u32 u32; + /** + * struct cvmx_usbcx_hcintx_s + * @datatglerr: Data Toggle Error (DataTglErr) + * @frmovrun: Frame Overrun (FrmOvrun) + * @bblerr: Babble Error (BblErr) + * @xacterr: Transaction Error (XactErr) + * @nyet: NYET Response Received Interrupt (NYET) + * @ack: ACK Response Received Interrupt (ACK) + * @nak: NAK Response Received Interrupt (NAK) + * @stall: STALL Response Received Interrupt (STALL) + * @ahberr: This bit is always 0x0. + * @chhltd: Channel Halted (ChHltd) + * Indicates the transfer completed abnormally either because of + * any USB transaction error or in response to disable request by + * the application. + * @xfercompl: Transfer Completed (XferCompl) + * Transfer completed normally without any errors. + */ + struct cvmx_usbcx_hcintx_s { + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerr : 1, + __BITFIELD_FIELD(u32 frmovrun : 1, + __BITFIELD_FIELD(u32 bblerr : 1, + __BITFIELD_FIELD(u32 xacterr : 1, + __BITFIELD_FIELD(u32 nyet : 1, + __BITFIELD_FIELD(u32 ack : 1, + __BITFIELD_FIELD(u32 nak : 1, + __BITFIELD_FIELD(u32 stall : 1, + __BITFIELD_FIELD(u32 ahberr : 1, + __BITFIELD_FIELD(u32 chhltd : 1, + __BITFIELD_FIELD(u32 xfercompl : 1, + ;)))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcintmsk# + * + * Host Channel-n Interrupt Mask Register (HCINTMSKn) + * + * This register reflects the mask for each channel status described in the + * previous section. + * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 + */ +union cvmx_usbcx_hcintmskx { + u32 u32; + /** + * struct cvmx_usbcx_hcintmskx_s + * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk) + * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk) + * @bblerrmsk: Babble Error Mask (BblErrMsk) + * @xacterrmsk: Transaction Error Mask (XactErrMsk) + * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk) + * @ackmsk: ACK Response Received Interrupt Mask (AckMsk) + * @nakmsk: NAK Response Received Interrupt Mask (NakMsk) + * @stallmsk: STALL Response Received Interrupt Mask (StallMsk) + * @ahberrmsk: AHB Error Mask (AHBErrMsk) + * @chhltdmsk: Channel Halted Mask (ChHltdMsk) + * @xfercomplmsk: Transfer Completed Mask (XferComplMsk) + */ + struct cvmx_usbcx_hcintmskx_s { + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerrmsk : 1, + __BITFIELD_FIELD(u32 frmovrunmsk : 1, + __BITFIELD_FIELD(u32 bblerrmsk : 1, + __BITFIELD_FIELD(u32 xacterrmsk : 1, + __BITFIELD_FIELD(u32 nyetmsk : 1, + __BITFIELD_FIELD(u32 ackmsk : 1, + __BITFIELD_FIELD(u32 nakmsk : 1, + __BITFIELD_FIELD(u32 stallmsk : 1, + __BITFIELD_FIELD(u32 ahberrmsk : 1, + __BITFIELD_FIELD(u32 chhltdmsk : 1, + __BITFIELD_FIELD(u32 xfercomplmsk : 1, + ;)))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hcsplt# + * + * Host Channel-n Split Control Register (HCSPLT) + * + */ +union cvmx_usbcx_hcspltx { + u32 u32; + /** + * struct cvmx_usbcx_hcspltx_s + * @spltena: Split Enable (SpltEna) + * The application sets this field to indicate that this channel is + * enabled to perform split transactions. + * @compsplt: Do Complete Split (CompSplt) + * The application sets this field to request the OTG host to + * perform a complete split transaction. + * @xactpos: Transaction Position (XactPos) + * This field is used to determine whether to send all, first, + * middle, or last payloads with each OUT transaction. + * * 2'b11: All. This is the entire data payload is of this + * transaction (which is less than or equal to 188 bytes). + * * 2'b10: Begin. This is the first data payload of this + * transaction (which is larger than 188 bytes). + * * 2'b00: Mid. This is the middle payload of this transaction + * (which is larger than 188 bytes). + * * 2'b01: End. This is the last payload of this transaction + * (which is larger than 188 bytes). + * @hubaddr: Hub Address (HubAddr) + * This field holds the device address of the transaction + * translator's hub. + * @prtaddr: Port Address (PrtAddr) + * This field is the port number of the recipient transaction + * translator. + */ + struct cvmx_usbcx_hcspltx_s { + __BITFIELD_FIELD(u32 spltena : 1, + __BITFIELD_FIELD(u32 reserved_17_30 : 14, + __BITFIELD_FIELD(u32 compsplt : 1, + __BITFIELD_FIELD(u32 xactpos : 2, + __BITFIELD_FIELD(u32 hubaddr : 7, + __BITFIELD_FIELD(u32 prtaddr : 7, + ;)))))) + } s; +}; + +/** + * cvmx_usbc#_hctsiz# + * + * Host Channel-n Transfer Size Register (HCTSIZ) + * + */ +union cvmx_usbcx_hctsizx { + u32 u32; + /** + * struct cvmx_usbcx_hctsizx_s + * @dopng: Do Ping (DoPng) + * Setting this field to 1 directs the host to do PING protocol. + * @pid: PID (Pid) + * The application programs this field with the type of PID to use + * for the initial transaction. The host will maintain this field + * for the rest of the transfer. + * * 2'b00: DATA0 + * * 2'b01: DATA2 + * * 2'b10: DATA1 + * * 2'b11: MDATA (non-control)/SETUP (control) + * @pktcnt: Packet Count (PktCnt) + * This field is programmed by the application with the expected + * number of packets to be transmitted (OUT) or received (IN). + * The host decrements this count on every successful + * transmission or reception of an OUT/IN packet. Once this count + * reaches zero, the application is interrupted to indicate normal + * completion. + * @xfersize: Transfer Size (XferSize) + * For an OUT, this field is the number of data bytes the host will + * send during the transfer. + * For an IN, this field is the buffer size that the application + * has reserved for the transfer. The application is expected to + * program this field as an integer multiple of the maximum packet + * size for IN transactions (periodic and non-periodic). + */ + struct cvmx_usbcx_hctsizx_s { + __BITFIELD_FIELD(u32 dopng : 1, + __BITFIELD_FIELD(u32 pid : 2, + __BITFIELD_FIELD(u32 pktcnt : 10, + __BITFIELD_FIELD(u32 xfersize : 19, + ;)))) + } s; +}; + +/** + * cvmx_usbc#_hfir + * + * Host Frame Interval Register (HFIR) + * + * This register stores the frame interval information for the current speed to + * which the O2P USB core has enumerated. + */ +union cvmx_usbcx_hfir { + u32 u32; + /** + * struct cvmx_usbcx_hfir_s + * @frint: Frame Interval (FrInt) + * The value that the application programs to this field specifies + * the interval between two consecutive SOFs (FS) or micro- + * SOFs (HS) or Keep-Alive tokens (HS). This field contains the + * number of PHY clocks that constitute the required frame + * interval. The default value set in this field for a FS operation + * when the PHY clock frequency is 60 MHz. The application can + * write a value to this register only after the Port Enable bit of + * the Host Port Control and Status register (HPRT.PrtEnaPort) + * has been set. If no value is programmed, the core calculates + * the value based on the PHY clock specified in the FS/LS PHY + * Clock Select field of the Host Configuration register + * (HCFG.FSLSPclkSel). Do not change the value of this field + * after the initial configuration. + * * 125 us (PHY clock frequency for HS) + * * 1 ms (PHY clock frequency for FS/LS) + */ + struct cvmx_usbcx_hfir_s { + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 frint : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hfnum + * + * Host Frame Number/Frame Time Remaining Register (HFNUM) + * + * This register indicates the current frame number. + * It also indicates the time remaining (in terms of the number of PHY clocks) + * in the current (micro)frame. + */ +union cvmx_usbcx_hfnum { + u32 u32; + /** + * struct cvmx_usbcx_hfnum_s + * @frrem: Frame Time Remaining (FrRem) + * Indicates the amount of time remaining in the current + * microframe (HS) or frame (FS/LS), in terms of PHY clocks. + * This field decrements on each PHY clock. When it reaches + * zero, this field is reloaded with the value in the Frame + * Interval register and a new SOF is transmitted on the USB. + * @frnum: Frame Number (FrNum) + * This field increments when a new SOF is transmitted on the + * USB, and is reset to 0 when it reaches 16'h3FFF. + */ + struct cvmx_usbcx_hfnum_s { + __BITFIELD_FIELD(u32 frrem : 16, + __BITFIELD_FIELD(u32 frnum : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hprt + * + * Host Port Control and Status Register (HPRT) + * + * This register is available in both Host and Device modes. + * Currently, the OTG Host supports only one port. + * A single register holds USB port-related information such as USB reset, + * enable, suspend, resume, connect status, and test mode for each port. The + * R_SS_WC bits in this register can trigger an interrupt to the application + * through the Host Port Interrupt bit of the Core Interrupt register + * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this + * register and clear the bit that caused the interrupt. For the R_SS_WC bits, + * the application must write a 1 to the bit to clear the interrupt. + */ +union cvmx_usbcx_hprt { + u32 u32; + /** + * struct cvmx_usbcx_hprt_s + * @prtspd: Port Speed (PrtSpd) + * Indicates the speed of the device attached to this port. + * * 2'b00: High speed + * * 2'b01: Full speed + * * 2'b10: Low speed + * * 2'b11: Reserved + * @prttstctl: Port Test Control (PrtTstCtl) + * The application writes a nonzero value to this field to put + * the port into a Test mode, and the corresponding pattern is + * signaled on the port. + * * 4'b0000: Test mode disabled + * * 4'b0001: Test_J mode + * * 4'b0010: Test_K mode + * * 4'b0011: Test_SE0_NAK mode + * * 4'b0100: Test_Packet mode + * * 4'b0101: Test_Force_Enable + * * Others: Reserved + * PrtSpd must be zero (i.e. the interface must be in high-speed + * mode) to use the PrtTstCtl test modes. + * @prtpwr: Port Power (PrtPwr) + * The application uses this field to control power to this port, + * and the core clears this bit on an overcurrent condition. + * * 1'b0: Power off + * * 1'b1: Power on + * @prtlnsts: Port Line Status (PrtLnSts) + * Indicates the current logic level USB data lines + * * Bit [10]: Logic level of D- + * * Bit [11]: Logic level of D+ + * @prtrst: Port Reset (PrtRst) + * When the application sets this bit, a reset sequence is + * started on this port. The application must time the reset + * period and clear this bit after the reset sequence is + * complete. + * * 1'b0: Port not in reset + * * 1'b1: Port in reset + * The application must leave this bit set for at least a + * minimum duration mentioned below to start a reset on the + * port. The application can leave it set for another 10 ms in + * addition to the required minimum duration, before clearing + * the bit, even though there is no maximum limit set by the + * USB standard. + * * High speed: 50 ms + * * Full speed/Low speed: 10 ms + * @prtsusp: Port Suspend (PrtSusp) + * The application sets this bit to put this port in Suspend + * mode. The core only stops sending SOFs when this is set. + * To stop the PHY clock, the application must set the Port + * Clock Stop bit, which will assert the suspend input pin of + * the PHY. + * The read value of this bit reflects the current suspend + * status of the port. This bit is cleared by the core after a + * remote wakeup signal is detected or the application sets + * the Port Reset bit or Port Resume bit in this register or the + * Resume/Remote Wakeup Detected Interrupt bit or + * Disconnect Detected Interrupt bit in the Core Interrupt + * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt, + * respectively). + * * 1'b0: Port not in Suspend mode + * * 1'b1: Port in Suspend mode + * @prtres: Port Resume (PrtRes) + * The application sets this bit to drive resume signaling on + * the port. The core continues to drive the resume signal + * until the application clears this bit. + * If the core detects a USB remote wakeup sequence, as + * indicated by the Port Resume/Remote Wakeup Detected + * Interrupt bit of the Core Interrupt register + * (GINTSTS.WkUpInt), the core starts driving resume + * signaling without application intervention and clears this bit + * when it detects a disconnect condition. The read value of + * this bit indicates whether the core is currently driving + * resume signaling. + * * 1'b0: No resume driven + * * 1'b1: Resume driven + * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng) + * The core sets this bit when the status of the Port + * Overcurrent Active bit (bit 4) in this register changes. + * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct) + * Indicates the overcurrent condition of the port. + * * 1'b0: No overcurrent condition + * * 1'b1: Overcurrent condition + * @prtenchng: Port Enable/Disable Change (PrtEnChng) + * The core sets this bit when the status of the Port Enable bit + * [2] of this register changes. + * @prtena: Port Enable (PrtEna) + * A port is enabled only by the core after a reset sequence, + * and is disabled by an overcurrent condition, a disconnect + * condition, or by the application clearing this bit. The + * application cannot set this bit by a register write. It can only + * clear it to disable the port. This bit does not trigger any + * interrupt to the application. + * * 1'b0: Port disabled + * * 1'b1: Port enabled + * @prtconndet: Port Connect Detected (PrtConnDet) + * The core sets this bit when a device connection is detected + * to trigger an interrupt to the application using the Host Port + * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt). + * The application must write a 1 to this bit to clear the + * interrupt. + * @prtconnsts: Port Connect Status (PrtConnSts) + * * 0: No device is attached to the port. + * * 1: A device is attached to the port. + */ + struct cvmx_usbcx_hprt_s { + __BITFIELD_FIELD(u32 reserved_19_31 : 13, + __BITFIELD_FIELD(u32 prtspd : 2, + __BITFIELD_FIELD(u32 prttstctl : 4, + __BITFIELD_FIELD(u32 prtpwr : 1, + __BITFIELD_FIELD(u32 prtlnsts : 2, + __BITFIELD_FIELD(u32 reserved_9_9 : 1, + __BITFIELD_FIELD(u32 prtrst : 1, + __BITFIELD_FIELD(u32 prtsusp : 1, + __BITFIELD_FIELD(u32 prtres : 1, + __BITFIELD_FIELD(u32 prtovrcurrchng : 1, + __BITFIELD_FIELD(u32 prtovrcurract : 1, + __BITFIELD_FIELD(u32 prtenchng : 1, + __BITFIELD_FIELD(u32 prtena : 1, + __BITFIELD_FIELD(u32 prtconndet : 1, + __BITFIELD_FIELD(u32 prtconnsts : 1, + ;))))))))))))))) + } s; +}; + +/** + * cvmx_usbc#_hptxfsiz + * + * Host Periodic Transmit FIFO Size Register (HPTXFSIZ) + * + * This register holds the size and the memory start address of the Periodic + * TxFIFO, as shown in Figures 310 and 311. + */ +union cvmx_usbcx_hptxfsiz { + u32 u32; + /** + * struct cvmx_usbcx_hptxfsiz_s + * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize) + * This value is in terms of 32-bit words. + * * Minimum value is 16 + * * Maximum value is 32768 + * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr) + */ + struct cvmx_usbcx_hptxfsiz_s { + __BITFIELD_FIELD(u32 ptxfsize : 16, + __BITFIELD_FIELD(u32 ptxfstaddr : 16, + ;)) + } s; +}; + +/** + * cvmx_usbc#_hptxsts + * + * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS) + * + * This read-only register contains the free space information for the Periodic + * TxFIFO and the Periodic Transmit Request Queue + */ +union cvmx_usbcx_hptxsts { + u32 u32; + /** + * struct cvmx_usbcx_hptxsts_s + * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop) + * This indicates the entry in the Periodic Tx Request Queue that + * is currently being processes by the MAC. + * This register is used for debugging. + * * Bit [31]: Odd/Even (micro)frame + * - 1'b0: send in even (micro)frame + * - 1'b1: send in odd (micro)frame + * * Bits [30:27]: Channel/endpoint number + * * Bits [26:25]: Type + * - 2'b00: IN/OUT + * - 2'b01: Zero-length packet + * - 2'b10: CSPLIT + * - 2'b11: Disable channel command + * * Bit [24]: Terminate (last entry for the selected + * channel/endpoint) + * @ptxqspcavail: Periodic Transmit Request Queue Space Available + * (PTxQSpcAvail) + * Indicates the number of free locations available to be written + * in the Periodic Transmit Request Queue. This queue holds both + * IN and OUT requests. + * * 8'h0: Periodic Transmit Request Queue is full + * * 8'h1: 1 location available + * * 8'h2: 2 locations available + * * n: n locations available (0..8) + * * Others: Reserved + * @ptxfspcavail: Periodic Transmit Data FIFO Space Available + * (PTxFSpcAvail) + * Indicates the number of free locations available to be written + * to in the Periodic TxFIFO. + * Values are in terms of 32-bit words + * * 16'h0: Periodic TxFIFO is full + * * 16'h1: 1 word available + * * 16'h2: 2 words available + * * 16'hn: n words available (where 0..32768) + * * 16'h8000: 32768 words available + * * Others: Reserved + */ + struct cvmx_usbcx_hptxsts_s { + __BITFIELD_FIELD(u32 ptxqtop : 8, + __BITFIELD_FIELD(u32 ptxqspcavail : 8, + __BITFIELD_FIELD(u32 ptxfspcavail : 16, + ;))) + } s; +}; + +/** + * cvmx_usbn#_clk_ctl + * + * USBN_CLK_CTL = USBN's Clock Control + * + * This register is used to control the frequency of the hclk and the + * hreset and phy_rst signals. + */ +union cvmx_usbnx_clk_ctl { + u64 u64; + /** + * struct cvmx_usbnx_clk_ctl_s + * @divide2: The 'hclk' used by the USB subsystem is derived + * from the eclk. + * Also see the field DIVIDE. DIVIDE2<1> must currently + * be zero because it is not implemented, so the maximum + * ratio of eclk/hclk is currently 16. + * The actual divide number for hclk is: + * (DIVIDE2 + 1) * (DIVIDE + 1) + * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to + * generate the hclk in the USB Subsystem is held + * in reset. This bit must be set to '0' before + * changing the value os DIVIDE in this register. + * The reset to the HCLK_DIVIDERis also asserted + * when core reset is asserted. + * @p_x_on: Force USB-PHY on during suspend. + * '1' USB-PHY XO block is powered-down during + * suspend. + * '0' USB-PHY XO block is powered-up during + * suspend. + * The value of this field must be set while POR is + * active. + * @p_rtype: PHY reference clock type + * On CN50XX/CN52XX/CN56XX the values are: + * '0' The USB-PHY uses a 12MHz crystal as a clock source + * at the USB_XO and USB_XI pins. + * '1' Reserved. + * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the + * USB_XO pin. USB_XI should be tied to ground in this + * case. + * '3' Reserved. + * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are: + * '0' Reserved. + * '1' Reserved. + * '2' The PHY PLL uses the XO block output as a reference. + * The XO block uses an external clock supplied on the + * XO pin. USB_XI should be tied to ground for this + * usage. + * '3' The XO block uses the clock from a crystal. + * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to + * remain powered in Suspend Mode. + * '1' The USB-PHY XO Bias, Bandgap and PLL are + * powered down in suspend mode. + * The value of this field must be set while POR is + * active. + * @p_c_sel: Phy clock speed select. + * Selects the reference clock / crystal frequency. + * '11': Reserved + * '10': 48 MHz (reserved when a crystal is used) + * '01': 24 MHz (reserved when a crystal is used) + * '00': 12 MHz + * The value of this field must be set while POR is + * active. + * NOTE: if a crystal is used as a reference clock, + * this field must be set to 12 MHz. + * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV. + * @sd_mode: Scaledown mode for the USBC. Control timing events + * in the USBC, for normal operation this must be '0'. + * @s_bist: Starts bist on the hclk memories, during the '0' + * to '1' transition. + * @por: Power On Reset for the PHY. + * Resets all the PHYS registers and state machines. + * @enable: When '1' allows the generation of the hclk. When + * '0' the hclk will not be generated. SEE DIVIDE + * field of this register. + * @prst: When this field is '0' the reset associated with + * the phy_clk functionality in the USB Subsystem is + * help in reset. This bit should not be set to '1' + * until the time it takes 6 clocks (hclk or phy_clk, + * whichever is slower) has passed. Under normal + * operation once this bit is set to '1' it should not + * be set to '0'. + * @hrst: When this field is '0' the reset associated with + * the hclk functioanlity in the USB Subsystem is + * held in reset.This bit should not be set to '1' + * until 12ms after phy_clk is stable. Under normal + * operation, once this bit is set to '1' it should + * not be set to '0'. + * @divide: The frequency of 'hclk' used by the USB subsystem + * is the eclk frequency divided by the value of + * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field + * DIVIDE2 of this register. + * The hclk frequency should be less than 125Mhz. + * After writing a value to this field the SW should + * read the field for the value written. + * The ENABLE field of this register should not be set + * until AFTER this field is set and then read. + */ + struct cvmx_usbnx_clk_ctl_s { + __BITFIELD_FIELD(u64 reserved_20_63 : 44, + __BITFIELD_FIELD(u64 divide2 : 2, + __BITFIELD_FIELD(u64 hclk_rst : 1, + __BITFIELD_FIELD(u64 p_x_on : 1, + __BITFIELD_FIELD(u64 p_rtype : 2, + __BITFIELD_FIELD(u64 p_com_on : 1, + __BITFIELD_FIELD(u64 p_c_sel : 2, + __BITFIELD_FIELD(u64 cdiv_byp : 1, + __BITFIELD_FIELD(u64 sd_mode : 2, + __BITFIELD_FIELD(u64 s_bist : 1, + __BITFIELD_FIELD(u64 por : 1, + __BITFIELD_FIELD(u64 enable : 1, + __BITFIELD_FIELD(u64 prst : 1, + __BITFIELD_FIELD(u64 hrst : 1, + __BITFIELD_FIELD(u64 divide : 3, + ;))))))))))))))) + } s; +}; + +/** + * cvmx_usbn#_usbp_ctl_status + * + * USBN_USBP_CTL_STATUS = USBP Control And Status Register + * + * Contains general control and status information for the USBN block. + */ +union cvmx_usbnx_usbp_ctl_status { + u64 u64; + /** + * struct cvmx_usbnx_usbp_ctl_status_s + * @txrisetune: HS Transmitter Rise/Fall Time Adjustment + * @txvreftune: HS DC Voltage Level Adjustment + * @txfslstune: FS/LS Source Impedance Adjustment + * @txhsxvtune: Transmitter High-Speed Crossover Adjustment + * @sqrxtune: Squelch Threshold Adjustment + * @compdistune: Disconnect Threshold Adjustment + * @otgtune: VBUS Valid Threshold Adjustment + * @otgdisable: OTG Block Disable + * @portreset: Per_Port Reset + * @drvvbus: Drive VBUS + * @lsbist: Low-Speed BIST Enable. + * @fsbist: Full-Speed BIST Enable. + * @hsbist: High-Speed BIST Enable. + * @bist_done: PHY Bist Done. + * Asserted at the end of the PHY BIST sequence. + * @bist_err: PHY Bist Error. + * Indicates an internal error was detected during + * the BIST sequence. + * @tdata_out: PHY Test Data Out. + * Presents either internally generated signals or + * test register contents, based upon the value of + * test_data_out_sel. + * @siddq: Drives the USBP (USB-PHY) SIDDQ input. + * Normally should be set to zero. + * When customers have no intent to use USB PHY + * interface, they should: + * - still provide 3.3V to USB_VDD33, and + * - tie USB_REXT to 3.3V supply, and + * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 + * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable + * @dma_bmode: When set to 1 the L2C DMA address will be updated + * with byte-counts between packets. When set to 0 + * the L2C DMA address is incremented to the next + * 4-byte aligned address after adding byte-count. + * @usbc_end: Bigendian input to the USB Core. This should be + * set to '0' for operation. + * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP. + * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP. + * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY. + * This signal enables the pull-down resistance on + * the D+ line. '1' pull down-resistance is connected + * to D+/ '0' pull down resistance is not connected + * to D+. When an A/B device is acting as a host + * (downstream-facing port), dp_pulldown and + * dm_pulldown are enabled. This must not toggle + * during normal operation. + * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY. + * This signal enables the pull-down resistance on + * the D- line. '1' pull down-resistance is connected + * to D-. '0' pull down resistance is not connected + * to D-. When an A/B device is acting as a host + * (downstream-facing port), dp_pulldown and + * dm_pulldown are enabled. This must not toggle + * during normal operation. + * @hst_mode: When '0' the USB is acting as HOST, when '1' + * USB is acting as device. This field needs to be + * set while the USB is in reset. + * @tuning: Transmitter Tuning for High-Speed Operation. + * Tunes the current supply and rise/fall output + * times for high-speed operation. + * [20:19] == 11: Current supply increased + * approximately 9% + * [20:19] == 10: Current supply increased + * approximately 4.5% + * [20:19] == 01: Design default. + * [20:19] == 00: Current supply decreased + * approximately 4.5% + * [22:21] == 11: Rise and fall times are increased. + * [22:21] == 10: Design default. + * [22:21] == 01: Rise and fall times are decreased. + * [22:21] == 00: Rise and fall times are decreased + * further as compared to the 01 setting. + * @tx_bs_enh: Transmit Bit Stuffing on [15:8]. + * Enables or disables bit stuffing on data[15:8] + * when bit-stuffing is enabled. + * @tx_bs_en: Transmit Bit Stuffing on [7:0]. + * Enables or disables bit stuffing on data[7:0] + * when bit-stuffing is enabled. + * @loop_enb: PHY Loopback Test Enable. + * '1': During data transmission the receive is + * enabled. + * '0': During data transmission the receive is + * disabled. + * Must be '0' for normal operation. + * @vtest_enb: Analog Test Pin Enable. + * '1' The PHY's analog_test pin is enabled for the + * input and output of applicable analog test signals. + * '0' THe analog_test pin is disabled. + * @bist_enb: Built-In Self Test Enable. + * Used to activate BIST in the PHY. + * @tdata_sel: Test Data Out Select. + * '1' test_data_out[3:0] (PHY) register contents + * are output. '0' internally generated signals are + * output. + * @taddr_in: Mode Address for Test Interface. + * Specifies the register address for writing to or + * reading from the PHY test interface register. + * @tdata_in: Internal Testing Register Input Data and Select + * This is a test bus. Data is present on [3:0], + * and its corresponding select (enable) is present + * on bits [7:4]. + * @ate_reset: Reset input from automatic test equipment. + * This is a test signal. When the USB Core is + * powered up (not in Susned Mode), an automatic + * tester can use this to disable phy_clock and + * free_clk, then re-enable them with an aligned + * phase. + * '1': The phy_clk and free_clk outputs are + * disabled. "0": The phy_clock and free_clk outputs + * are available within a specific period after the + * de-assertion. + */ + struct cvmx_usbnx_usbp_ctl_status_s { + __BITFIELD_FIELD(u64 txrisetune : 1, + __BITFIELD_FIELD(u64 txvreftune : 4, + __BITFIELD_FIELD(u64 txfslstune : 4, + __BITFIELD_FIELD(u64 txhsxvtune : 2, + __BITFIELD_FIELD(u64 sqrxtune : 3, + __BITFIELD_FIELD(u64 compdistune : 3, + __BITFIELD_FIELD(u64 otgtune : 3, + __BITFIELD_FIELD(u64 otgdisable : 1, + __BITFIELD_FIELD(u64 portreset : 1, + __BITFIELD_FIELD(u64 drvvbus : 1, + __BITFIELD_FIELD(u64 lsbist : 1, + __BITFIELD_FIELD(u64 fsbist : 1, + __BITFIELD_FIELD(u64 hsbist : 1, + __BITFIELD_FIELD(u64 bist_done : 1, + __BITFIELD_FIELD(u64 bist_err : 1, + __BITFIELD_FIELD(u64 tdata_out : 4, + __BITFIELD_FIELD(u64 siddq : 1, + __BITFIELD_FIELD(u64 txpreemphasistune : 1, + __BITFIELD_FIELD(u64 dma_bmode : 1, + __BITFIELD_FIELD(u64 usbc_end : 1, + __BITFIELD_FIELD(u64 usbp_bist : 1, + __BITFIELD_FIELD(u64 tclk : 1, + __BITFIELD_FIELD(u64 dp_pulld : 1, + __BITFIELD_FIELD(u64 dm_pulld : 1, + __BITFIELD_FIELD(u64 hst_mode : 1, + __BITFIELD_FIELD(u64 tuning : 4, + __BITFIELD_FIELD(u64 tx_bs_enh : 1, + __BITFIELD_FIELD(u64 tx_bs_en : 1, + __BITFIELD_FIELD(u64 loop_enb : 1, + __BITFIELD_FIELD(u64 vtest_enb : 1, + __BITFIELD_FIELD(u64 bist_enb : 1, + __BITFIELD_FIELD(u64 tdata_sel : 1, + __BITFIELD_FIELD(u64 taddr_in : 4, + __BITFIELD_FIELD(u64 tdata_in : 8, + __BITFIELD_FIELD(u64 ate_reset : 1, + ;))))))))))))))))))))))))))))))))))) + } s; +}; + +#endif /* __OCTEON_HCD_H__ */ diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig new file mode 100644 index 000000000000..5319909eb2f6 --- /dev/null +++ b/drivers/staging/octeon/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +config OCTEON_ETHERNET + tristate "Cavium Networks Octeon Ethernet support" + depends on CAVIUM_OCTEON_SOC || COMPILE_TEST + depends on NETDEVICES + select PHYLIB + select MDIO_OCTEON + help + This driver supports the builtin ethernet ports on Cavium + Networks' products in the Octeon family. This driver supports the + CN3XXX and CN5XXX Octeon processors. + + To compile this driver as a module, choose M here. The module + will be called octeon-ethernet. + diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile new file mode 100644 index 000000000000..3887cf5f1e84 --- /dev/null +++ b/drivers/staging/octeon/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2005-2009 Cavium Networks +# + +# +# Makefile for Cavium OCTEON on-board ethernet driver +# + +obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o + +octeon-ethernet-y := ethernet.o +octeon-ethernet-y += ethernet-mdio.o +octeon-ethernet-y += ethernet-mem.o +octeon-ethernet-y += ethernet-rgmii.o +octeon-ethernet-y += ethernet-rx.o +octeon-ethernet-y += ethernet-sgmii.o +octeon-ethernet-y += ethernet-spi.o +octeon-ethernet-y += ethernet-tx.o diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO new file mode 100644 index 000000000000..67a0a1f6b922 --- /dev/null +++ b/drivers/staging/octeon/TODO @@ -0,0 +1,9 @@ +This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3 +chips at least up to CN7030. + +TODO: + - general code review and clean up + - make driver self-contained instead of being split between staging and + arch/mips/cavium-octeon. + +Contact: Aaro Koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h new file mode 100644 index 000000000000..ef9e767b0e2e --- /dev/null +++ b/drivers/staging/octeon/ethernet-defines.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +/* + * A few defines are used to control the operation of this driver: + * USE_ASYNC_IOBDMA + * Use asynchronous IO access to hardware. This uses Octeon's asynchronous + * IOBDMAs to issue IO accesses without stalling. Set this to zero + * to disable this. Note that IOBDMAs require CVMSEG. + * REUSE_SKBUFFS_WITHOUT_FREE + * Allows the TX path to free an skbuff into the FPA hardware pool. This + * can significantly improve performance for forwarding and bridging, but + * may be somewhat dangerous. Checks are made, but if any buffer is reused + * without the proper Linux cleanup, the networking stack may have very + * bizarre bugs. + */ +#ifndef __ETHERNET_DEFINES_H__ +#define __ETHERNET_DEFINES_H__ + +#ifdef CONFIG_NETFILTER +#define REUSE_SKBUFFS_WITHOUT_FREE 0 +#else +#define REUSE_SKBUFFS_WITHOUT_FREE 1 +#endif + +#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) + +/* Maximum number of SKBs to try to free per xmit packet. */ +#define MAX_OUT_QUEUE_DEPTH 1000 + +#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32)) +#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32)) + +#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1) + +#endif /* __ETHERNET_DEFINES_H__ */ diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c new file mode 100644 index 000000000000..c798672d61b2 --- /dev/null +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <linux/phy.h> +#include <linux/ratelimit.h> +#include <linux/of_mdio.h> +#include <generated/utsrelease.h> +#include <net/dst.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-mdio.h" +#include "ethernet-util.h" + +static void cvm_oct_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, UTS_RELEASE, sizeof(info->version)); + strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info)); +} + +static int cvm_oct_nway_reset(struct net_device *dev) +{ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->phydev) + return phy_start_aneg(dev->phydev); + + return -EINVAL; +} + +const struct ethtool_ops cvm_oct_ethtool_ops = { + .get_drvinfo = cvm_oct_get_drvinfo, + .nway_reset = cvm_oct_nway_reset, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +/** + * cvm_oct_ioctl - IOCTL support for PHY control + * @dev: Device to change + * @rq: the request + * @cmd: the command + * + * Returns Zero on success + */ +int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -EINVAL; + + return phy_mii_ioctl(dev->phydev, rq, cmd); +} + +void cvm_oct_note_carrier(struct octeon_ethernet *priv, + union cvmx_helper_link_info li) +{ + if (li.s.link_up) { + pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n", + netdev_name(priv->netdev), li.s.speed, + (li.s.full_duplex) ? "Full" : "Half", + priv->port, priv->queue); + } else { + pr_notice_ratelimited("%s: Link down\n", + netdev_name(priv->netdev)); + } +} + +void cvm_oct_adjust_link(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_helper_link_info link_info; + + link_info.u64 = 0; + link_info.s.link_up = dev->phydev->link ? 1 : 0; + link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0; + link_info.s.speed = dev->phydev->speed; + priv->link_info = link_info.u64; + + /* + * The polling task need to know about link status changes. + */ + if (priv->poll) + priv->poll(dev); + + if (priv->last_link != dev->phydev->link) { + priv->last_link = dev->phydev->link; + cvmx_helper_link_set(priv->port, link_info); + cvm_oct_note_carrier(priv, link_info); + } +} + +int cvm_oct_common_stop(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + union cvmx_helper_link_info link_info; + union cvmx_gmxx_prtx_cfg gmx_cfg; + int index = INDEX(priv->port); + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + priv->poll = NULL; + + if (dev->phydev) + phy_disconnect(dev->phydev); + + if (priv->last_link) { + link_info.u64 = 0; + priv->last_link = 0; + + cvmx_helper_link_set(priv->port, link_info); + cvm_oct_note_carrier(priv, link_info); + } + return 0; +} + +/** + * cvm_oct_phy_setup_device - setup the PHY + * + * @dev: Device to setup + * + * Returns Zero on success, negative on failure + */ +int cvm_oct_phy_setup_device(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + struct device_node *phy_node; + struct phy_device *phydev = NULL; + + if (!priv->of_node) + goto no_phy; + + phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { + int rc; + + rc = of_phy_register_fixed_link(priv->of_node); + if (rc) + return rc; + + phy_node = of_node_get(priv->of_node); + } + if (!phy_node) + goto no_phy; + + phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, + priv->phy_mode); + of_node_put(phy_node); + + if (!phydev) + return -ENODEV; + + priv->last_link = 0; + phy_start(phydev); + + return 0; +no_phy: + /* If there is no phy, assume a direct MAC connection and that + * the link is up. + */ + netif_carrier_on(dev); + return 0; +} diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h new file mode 100644 index 000000000000..e3771d48c49b --- /dev/null +++ b/drivers/staging/octeon/ethernet-mdio.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/string.h> +#include <linux/ethtool.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +extern const struct ethtool_ops cvm_oct_ethtool_ops; + +void octeon_mdiobus_force_mod_depencency(void); + +int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +int cvm_oct_phy_setup_device(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c new file mode 100644 index 000000000000..532594957ebc --- /dev/null +++ b/drivers/staging/octeon/ethernet-mem.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2010 Cavium Networks + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/slab.h> + +#include "octeon-ethernet.h" +#include "ethernet-mem.h" +#include "ethernet-defines.h" + +/** + * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs + * @pool: Pool to allocate an skbuff for + * @size: Size of the buffer needed for the pool + * @elements: Number of buffers to allocate + * + * Returns the actual number of buffers allocated. + */ +static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) +{ + int freed = elements; + + while (freed) { + struct sk_buff *skb = dev_alloc_skb(size + 256); + + if (unlikely(!skb)) + break; + skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); + *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; + cvmx_fpa_free(skb->data, pool, size / 128); + freed--; + } + return elements - freed; +} + +/** + * cvm_oct_free_hw_skbuff- free hardware pool skbuffs + * @pool: Pool to allocate an skbuff for + * @size: Size of the buffer needed for the pool + * @elements: Number of buffers to allocate + */ +static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) +{ + char *memory; + + do { + memory = cvmx_fpa_alloc(pool); + if (memory) { + struct sk_buff *skb = + *(struct sk_buff **)(memory - sizeof(void *)); + elements--; + dev_kfree_skb(skb); + } + } while (memory); + + if (elements < 0) + pr_warn("Freeing of pool %u had too many skbuffs (%d)\n", + pool, elements); + else if (elements > 0) + pr_warn("Freeing of pool %u is missing %d skbuffs\n", + pool, elements); +} + +/** + * cvm_oct_fill_hw_memory - fill a hardware pool with memory. + * @pool: Pool to populate + * @size: Size of each buffer in the pool + * @elements: Number of buffers to allocate + * + * Returns the actual number of buffers allocated. + */ +static int cvm_oct_fill_hw_memory(int pool, int size, int elements) +{ + char *memory; + char *fpa; + int freed = elements; + + while (freed) { + /* + * FPA memory must be 128 byte aligned. Since we are + * aligning we need to save the original pointer so we + * can feed it to kfree when the memory is returned to + * the kernel. + * + * We allocate an extra 256 bytes to allow for + * alignment and space for the original pointer saved + * just before the block. + */ + memory = kmalloc(size + 256, GFP_ATOMIC); + if (unlikely(!memory)) { + pr_warn("Unable to allocate %u bytes for FPA pool %d\n", + elements * size, pool); + break; + } + fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); + *((char **)fpa - 1) = memory; + cvmx_fpa_free(fpa, pool, 0); + freed--; + } + return elements - freed; +} + +/** + * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory + * @pool: FPA pool to free + * @size: Size of each buffer in the pool + * @elements: Number of buffers that should be in the pool + */ +static void cvm_oct_free_hw_memory(int pool, int size, int elements) +{ + char *memory; + char *fpa; + + do { + fpa = cvmx_fpa_alloc(pool); + if (fpa) { + elements--; + fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa)); + memory = *((char **)fpa - 1); + kfree(memory); + } + } while (fpa); + + if (elements < 0) + pr_warn("Freeing of pool %u had too many buffers (%d)\n", + pool, elements); + else if (elements > 0) + pr_warn("Warning: Freeing of pool %u is missing %d buffers\n", + pool, elements); +} + +int cvm_oct_mem_fill_fpa(int pool, int size, int elements) +{ + int freed; + + if (pool == CVMX_FPA_PACKET_POOL) + freed = cvm_oct_fill_hw_skbuff(pool, size, elements); + else + freed = cvm_oct_fill_hw_memory(pool, size, elements); + return freed; +} + +void cvm_oct_mem_empty_fpa(int pool, int size, int elements) +{ + if (pool == CVMX_FPA_PACKET_POOL) + cvm_oct_free_hw_skbuff(pool, size, elements); + else + cvm_oct_free_hw_memory(pool, size, elements); +} diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h new file mode 100644 index 000000000000..692dcdb7154d --- /dev/null +++ b/drivers/staging/octeon/ethernet-mem.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +int cvm_oct_mem_fill_fpa(int pool, int size, int elements); +void cvm_oct_mem_empty_fpa(int pool, int size, int elements); diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c new file mode 100644 index 000000000000..0c4fac31540a --- /dev/null +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/phy.h> +#include <linux/ratelimit.h> +#include <net/dst.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-util.h" +#include "ethernet-mdio.h" + +static DEFINE_SPINLOCK(global_register_lock); + +static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) +{ + union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; + union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; + union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + /* Set preamble checking. */ + gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, + interface)); + gmxx_rxx_frm_ctl.s.pre_chk = enable; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), + gmxx_rxx_frm_ctl.u64); + + /* Set FCS stripping. */ + ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); + if (enable) + ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port; + else + ipd_sub_port_fcs.s.port_bit &= + 0xffffffffull ^ (1ull << priv->port); + cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); + + /* Clear any error bits. */ + gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, + interface)); + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), + gmxx_rxx_int_reg.u64); +} + +static void cvm_oct_check_preamble_errors(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_helper_link_info link_info; + unsigned long flags; + + link_info.u64 = priv->link_info; + + /* + * Take the global register lock since we are going to + * touch registers that affect more than one port. + */ + spin_lock_irqsave(&global_register_lock, flags); + + if (link_info.s.speed == 10 && priv->last_speed == 10) { + /* + * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are + * getting preamble errors. + */ + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; + + gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG + (index, interface)); + if (gmxx_rxx_int_reg.s.pcterr) { + /* + * We are getting preamble errors at 10Mbps. Most + * likely the PHY is giving us packets with misaligned + * preambles. In order to get these packets we need to + * disable preamble checking and do it in software. + */ + cvm_oct_set_hw_preamble(priv, false); + printk_ratelimited("%s: Using 10Mbps with software preamble removal\n", + dev->name); + } + } else { + /* + * Since the 10Mbps preamble workaround is allowed we need to + * enable preamble checking, FCS stripping, and clear error + * bits on every speed change. If errors occur during 10Mbps + * operation the above code will change this stuff + */ + if (priv->last_speed != link_info.s.speed) + cvm_oct_set_hw_preamble(priv, true); + priv->last_speed = link_info.s.speed; + } + spin_unlock_irqrestore(&global_register_lock, flags); +} + +static void cvm_oct_rgmii_poll(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_helper_link_info link_info; + bool status_change; + + link_info = cvmx_helper_link_get(priv->port); + if (priv->link_info != link_info.u64 && + cvmx_helper_link_set(priv->port, link_info)) + link_info.u64 = priv->link_info; + status_change = priv->link_info != link_info.u64; + priv->link_info = link_info.u64; + + cvm_oct_check_preamble_errors(dev); + + if (likely(!status_change)) + return; + + /* Tell core. */ + if (link_info.s.link_up) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + } + cvm_oct_note_carrier(priv, link_info); +} + +int cvm_oct_rgmii_open(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int ret; + + ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll); + if (ret) + return ret; + + if (dev->phydev) { + /* + * In phydev mode, we need still periodic polling for the + * preamble error checking, and we also need to call this + * function on every link state change. + * + * Only true RGMII ports need to be polled. In GMII mode, port + * 0 is really a RGMII port. + */ + if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII && + priv->port == 0) || + (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { + priv->poll = cvm_oct_check_preamble_errors; + cvm_oct_check_preamble_errors(dev); + } + } + + return 0; +} diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c new file mode 100644 index 000000000000..2c16230f993c --- /dev/null +++ b/drivers/staging/octeon/ethernet-rx.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2010 Cavium Networks + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cache.h> +#include <linux/cpumask.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/string.h> +#include <linux/prefetch.h> +#include <linux/ratelimit.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-mem.h" +#include "ethernet-rx.h" +#include "ethernet-util.h" + +static atomic_t oct_rx_ready = ATOMIC_INIT(0); + +static struct oct_rx_group { + int irq; + int group; + struct napi_struct napi; +} oct_rx_group[16]; + +/** + * cvm_oct_do_interrupt - interrupt handler. + * @irq: Interrupt number. + * @napi_id: Cookie to identify the NAPI instance. + * + * The interrupt occurs whenever the POW has packets in our group. + * + */ +static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id) +{ + /* Disable the IRQ and start napi_poll. */ + disable_irq_nosync(irq); + napi_schedule(napi_id); + + return IRQ_HANDLED; +} + +/** + * cvm_oct_check_rcv_error - process receive errors + * @work: Work queue entry pointing to the packet. + * + * Returns Non-zero if the packet can be dropped, zero otherwise. + */ +static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) +{ + int port; + + if (octeon_has_feature(OCTEON_FEATURE_PKND)) + port = work->word0.pip.cn68xx.pknd; + else + port = work->word1.cn38xx.ipprt; + + if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { + /* + * Ignore length errors on min size packets. Some + * equipment incorrectly pads packets to 64+4FCS + * instead of 60+4FCS. Note these packets still get + * counted as frame errors. + */ + } else if (work->word2.snoip.err_code == 5 || + work->word2.snoip.err_code == 7) { + /* + * We received a packet with either an alignment error + * or a FCS error. This may be signalling that we are + * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] + * off. If this is the case we need to parse the + * packet to determine if we can remove a non spec + * preamble and generate a correct packet. + */ + int interface = cvmx_helper_get_interface_num(port); + int index = cvmx_helper_get_interface_index_num(port); + union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; + + gmxx_rxx_frm_ctl.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); + if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { + u8 *ptr = + cvmx_phys_to_ptr(work->packet_ptr.s.addr); + int i = 0; + + while (i < work->word1.len - 1) { + if (*ptr != 0x55) + break; + ptr++; + i++; + } + + if (*ptr == 0xd5) { + /* Port received 0xd5 preamble */ + work->packet_ptr.s.addr += i + 1; + work->word1.len -= i + 5; + } else if ((*ptr & 0xf) == 0xd) { + /* Port received 0xd preamble */ + work->packet_ptr.s.addr += i; + work->word1.len -= i + 4; + for (i = 0; i < work->word1.len; i++) { + *ptr = + ((*ptr & 0xf0) >> 4) | + ((*(ptr + 1) & 0xf) << 4); + ptr++; + } + } else { + printk_ratelimited("Port %d unknown preamble, packet dropped\n", + port); + cvm_oct_free_work(work); + return 1; + } + } + } else { + printk_ratelimited("Port %d receive error code %d, packet dropped\n", + port, work->word2.snoip.err_code); + cvm_oct_free_work(work); + return 1; + } + + return 0; +} + +static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb) +{ + int segments = work->word2.s.bufs; + union cvmx_buf_ptr segment_ptr = work->packet_ptr; + int len = work->word1.len; + int segment_size; + + while (segments--) { + union cvmx_buf_ptr next_ptr; + + next_ptr = *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr(segment_ptr.s.addr - 8); + + /* + * Octeon Errata PKI-100: The segment size is wrong. + * + * Until it is fixed, calculate the segment size based on + * the packet pool buffer size. + * When it is fixed, the following line should be replaced + * with this one: + * int segment_size = segment_ptr.s.size; + */ + segment_size = + CVMX_FPA_PACKET_POOL_SIZE - + (segment_ptr.s.addr - + (((segment_ptr.s.addr >> 7) - + segment_ptr.s.back) << 7)); + + /* Don't copy more than what is left in the packet */ + if (segment_size > len) + segment_size = len; + + /* Copy the data into the packet */ + skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr), + segment_size); + len -= segment_size; + segment_ptr = next_ptr; + } +} + +static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) +{ + const int coreid = cvmx_get_core_num(); + u64 old_group_mask; + u64 old_scratch; + int rx_count = 0; + int did_work_request = 0; + int packet_not_copied; + + /* Prefetch cvm_oct_device since we know we need it soon */ + prefetch(cvm_oct_device); + + if (USE_ASYNC_IOBDMA) { + /* Save scratch in case userspace is using it */ + CVMX_SYNCIOBDMA; + old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + } + + /* Only allow work for our group (and preserve priorities) */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { + old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); + cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), + BIT(rx_group->group)); + cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ + } else { + old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), + (old_group_mask & ~0xFFFFull) | + BIT(rx_group->group)); + } + + if (USE_ASYNC_IOBDMA) { + cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); + did_work_request = 1; + } + + while (rx_count < budget) { + struct sk_buff *skb = NULL; + struct sk_buff **pskb = NULL; + int skb_in_hw; + struct cvmx_wqe *work; + int port; + + if (USE_ASYNC_IOBDMA && did_work_request) + work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); + else + work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); + + prefetch(work); + did_work_request = 0; + if (!work) { + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { + cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS, + BIT(rx_group->group)); + cvmx_write_csr(CVMX_SSO_WQ_INT, + BIT(rx_group->group)); + } else { + union cvmx_pow_wq_int wq_int; + + wq_int.u64 = 0; + wq_int.s.iq_dis = BIT(rx_group->group); + wq_int.s.wq_int = BIT(rx_group->group); + cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); + } + break; + } + pskb = (struct sk_buff **) + (cvm_oct_get_buffer_ptr(work->packet_ptr) - + sizeof(void *)); + prefetch(pskb); + + if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { + cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, + CVMX_POW_NO_WAIT); + did_work_request = 1; + } + rx_count++; + + skb_in_hw = work->word2.s.bufs == 1; + if (likely(skb_in_hw)) { + skb = *pskb; + prefetch(&skb->head); + prefetch(&skb->len); + } + + if (octeon_has_feature(OCTEON_FEATURE_PKND)) + port = work->word0.pip.cn68xx.pknd; + else + port = work->word1.cn38xx.ipprt; + + prefetch(cvm_oct_device[port]); + + /* Immediately throw away all packets with receive errors */ + if (unlikely(work->word2.snoip.rcv_error)) { + if (cvm_oct_check_rcv_error(work)) + continue; + } + + /* + * We can only use the zero copy path if skbuffs are + * in the FPA pool and the packet fits in a single + * buffer. + */ + if (likely(skb_in_hw)) { + skb->data = skb->head + work->packet_ptr.s.addr - + cvmx_ptr_to_phys(skb->head); + prefetch(skb->data); + skb->len = work->word1.len; + skb_set_tail_pointer(skb, skb->len); + packet_not_copied = 1; + } else { + /* + * We have to copy the packet. First allocate + * an skbuff for it. + */ + skb = dev_alloc_skb(work->word1.len); + if (!skb) { + cvm_oct_free_work(work); + continue; + } + + /* + * Check if we've received a packet that was + * entirely stored in the work entry. + */ + if (unlikely(work->word2.s.bufs == 0)) { + u8 *ptr = work->packet_data; + + if (likely(!work->word2.s.not_IP)) { + /* + * The beginning of the packet + * moves for IP packets. + */ + if (work->word2.s.is_v6) + ptr += 2; + else + ptr += 6; + } + skb_put_data(skb, ptr, work->word1.len); + /* No packet buffers to free */ + } else { + copy_segments_to_skb(work, skb); + } + packet_not_copied = 0; + } + if (likely((port < TOTAL_NUMBER_OF_PORTS) && + cvm_oct_device[port])) { + struct net_device *dev = cvm_oct_device[port]; + + /* + * Only accept packets for devices that are + * currently up. + */ + if (likely(dev->flags & IFF_UP)) { + skb->protocol = eth_type_trans(skb, dev); + skb->dev = dev; + + if (unlikely(work->word2.s.not_IP || + work->word2.s.IP_exc || + work->word2.s.L4_error || + !work->word2.s.tcp_or_udp)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Increment RX stats for virtual ports */ + if (port >= CVMX_PIP_NUM_INPUT_PORTS) { + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + } + netif_receive_skb(skb); + } else { + /* + * Drop any packet received for a device that + * isn't up. + */ + dev->stats.rx_dropped++; + dev_kfree_skb_irq(skb); + } + } else { + /* + * Drop any packet received for a device that + * doesn't exist. + */ + printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", + port); + dev_kfree_skb_irq(skb); + } + /* + * Check to see if the skbuff and work share the same + * packet buffer. + */ + if (likely(packet_not_copied)) { + /* + * This buffer needs to be replaced, increment + * the number of buffers we need to free by + * one. + */ + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + 1); + + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); + } else { + cvm_oct_free_work(work); + } + } + /* Restore the original POW group mask */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { + cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask); + cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ + } else { + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); + } + + if (USE_ASYNC_IOBDMA) { + /* Restore the scratch area */ + cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); + } + cvm_oct_rx_refill_pool(0); + + return rx_count; +} + +/** + * cvm_oct_napi_poll - the NAPI poll function. + * @napi: The NAPI instance. + * @budget: Maximum number of packets to receive. + * + * Returns the number of packets processed. + */ +static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) +{ + struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group, + napi); + int rx_count; + + rx_count = cvm_oct_poll(rx_group, budget); + + if (rx_count < budget) { + /* No more work */ + napi_complete_done(napi, rx_count); + enable_irq(rx_group->irq); + } + return rx_count; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * cvm_oct_poll_controller - poll for receive packets + * device. + * + * @dev: Device to poll. Unused + */ +void cvm_oct_poll_controller(struct net_device *dev) +{ + int i; + + if (!atomic_read(&oct_rx_ready)) + return; + + for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { + if (!(pow_receive_groups & BIT(i))) + continue; + + cvm_oct_poll(&oct_rx_group[i], 16); + } +} +#endif + +void cvm_oct_rx_initialize(void) +{ + int i; + struct net_device *dev_for_napi = NULL; + + for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { + if (cvm_oct_device[i]) { + dev_for_napi = cvm_oct_device[i]; + break; + } + } + + if (!dev_for_napi) + panic("No net_devices were allocated."); + + for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { + int ret; + + if (!(pow_receive_groups & BIT(i))) + continue; + + netif_napi_add(dev_for_napi, &oct_rx_group[i].napi, + cvm_oct_napi_poll, rx_napi_weight); + napi_enable(&oct_rx_group[i].napi); + + oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i; + oct_rx_group[i].group = i; + + /* Register an IRQ handler to receive POW interrupts */ + ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0, + "Ethernet", &oct_rx_group[i].napi); + if (ret) + panic("Could not acquire Ethernet IRQ %d\n", + oct_rx_group[i].irq); + + disable_irq_nosync(oct_rx_group[i].irq); + + /* Enable POW interrupt when our port has at least one packet */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { + union cvmx_sso_wq_int_thrx int_thr; + union cvmx_pow_wq_int_pc int_pc; + + int_thr.u64 = 0; + int_thr.s.tc_en = 1; + int_thr.s.tc_thr = 1; + cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64); + + int_pc.u64 = 0; + int_pc.s.pc_thr = 5; + cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64); + } else { + union cvmx_pow_wq_int_thrx int_thr; + union cvmx_pow_wq_int_pc int_pc; + + int_thr.u64 = 0; + int_thr.s.tc_en = 1; + int_thr.s.tc_thr = 1; + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64); + + int_pc.u64 = 0; + int_pc.s.pc_thr = 5; + cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); + } + + /* Schedule NAPI now. This will indirectly enable the + * interrupt. + */ + napi_schedule(&oct_rx_group[i].napi); + } + atomic_inc(&oct_rx_ready); +} + +void cvm_oct_rx_shutdown(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { + if (!(pow_receive_groups & BIT(i))) + continue; + + /* Disable POW interrupt */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0); + else + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0); + + /* Free the interrupt handler */ + free_irq(oct_rx_group[i].irq, cvm_oct_device); + + netif_napi_del(&oct_rx_group[i].napi); + } +} diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h new file mode 100644 index 000000000000..ff6482fa20d6 --- /dev/null +++ b/drivers/staging/octeon/ethernet-rx.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +void cvm_oct_poll_controller(struct net_device *dev); +void cvm_oct_rx_initialize(void); +void cvm_oct_rx_shutdown(void); + +static inline void cvm_oct_rx_refill_pool(int fill_threshold) +{ + int number_to_free; + int num_freed; + /* Refill the packet buffer pool */ + number_to_free = + cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + + if (number_to_free > fill_threshold) { + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + -number_to_free); + num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, + CVMX_FPA_PACKET_POOL_SIZE, + number_to_free); + if (num_freed != number_to_free) { + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + number_to_free - num_freed); + } + } +} diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c new file mode 100644 index 000000000000..d7fbd9159302 --- /dev/null +++ b/drivers/staging/octeon/ethernet-sgmii.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/phy.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/ratelimit.h> +#include <net/dst.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-util.h" +#include "ethernet-mdio.h" + +int cvm_oct_sgmii_open(struct net_device *dev) +{ + return cvm_oct_common_open(dev, cvm_oct_link_poll); +} + +int cvm_oct_sgmii_init(struct net_device *dev) +{ + cvm_oct_common_init(dev); + + /* FIXME: Need autoneg logic */ + return 0; +} diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c new file mode 100644 index 000000000000..c582403e6a1f --- /dev/null +++ b/drivers/staging/octeon/ethernet-spi.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <net/dst.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-util.h" + +static int number_spi_ports; +static int need_retrain[2] = { 0, 0 }; + +static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index) +{ + if (spx_int_reg.s.spf) + pr_err("SPI%d: SRX Spi4 interface down\n", index); + if (spx_int_reg.s.calerr) + pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index); + if (spx_int_reg.s.syncerr) + pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n", + index); + if (spx_int_reg.s.diperr) + pr_err("SPI%d: SRX Spi4 DIP4 error\n", index); + if (spx_int_reg.s.tpaovr) + pr_err("SPI%d: SRX Selected port has hit TPA overflow\n", + index); + if (spx_int_reg.s.rsverr) + pr_err("SPI%d: SRX Spi4 reserved control word detected\n", + index); + if (spx_int_reg.s.drwnng) + pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n", + index); + if (spx_int_reg.s.clserr) + pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n", + index); + if (spx_int_reg.s.spiovr) + pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index); + if (spx_int_reg.s.abnorm) + pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n", + index); + if (spx_int_reg.s.prtnxa) + pr_err("SPI%d: SRX Port out of range\n", index); +} + +static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index) +{ + if (stx_int_reg.s.syncerr) + pr_err("SPI%d: STX Interface encountered a fatal error\n", + index); + if (stx_int_reg.s.frmerr) + pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n", + index); + if (stx_int_reg.s.unxfrm) + pr_err("SPI%d: STX Unexpected framing sequence\n", index); + if (stx_int_reg.s.nosync) + pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n", + index); + if (stx_int_reg.s.diperr) + pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n", + index); + if (stx_int_reg.s.datovr) + pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index); + if (stx_int_reg.s.ovrbst) + pr_err("SPI%d: STX Transmit packet burst too big\n", index); + if (stx_int_reg.s.calpar1) + pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n", + index, 1); + if (stx_int_reg.s.calpar0) + pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n", + index, 0); +} + +static irqreturn_t cvm_oct_spi_spx_int(int index) +{ + union cvmx_spxx_int_reg spx_int_reg; + union cvmx_stxx_int_reg stx_int_reg; + + spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index)); + cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64); + if (!need_retrain[index]) { + spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index)); + cvm_oct_spxx_int_pr(spx_int_reg, index); + } + + stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index)); + cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64); + if (!need_retrain[index]) { + stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index)); + cvm_oct_stxx_int_pr(stx_int_reg, index); + } + + cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0); + cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0); + need_retrain[index] = 1; + + return IRQ_HANDLED; +} + +static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id) +{ + irqreturn_t return_status = IRQ_NONE; + union cvmx_npi_rsl_int_blocks rsl_int_blocks; + + /* Check and see if this interrupt was caused by the GMX block */ + rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); + if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */ + return_status = cvm_oct_spi_spx_int(1); + + if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */ + return_status = cvm_oct_spi_spx_int(0); + + return return_status; +} + +static void cvm_oct_spi_enable_error_reporting(int interface) +{ + union cvmx_spxx_int_msk spxx_int_msk; + union cvmx_stxx_int_msk stxx_int_msk; + + spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface)); + spxx_int_msk.s.calerr = 1; + spxx_int_msk.s.syncerr = 1; + spxx_int_msk.s.diperr = 1; + spxx_int_msk.s.tpaovr = 1; + spxx_int_msk.s.rsverr = 1; + spxx_int_msk.s.drwnng = 1; + spxx_int_msk.s.clserr = 1; + spxx_int_msk.s.spiovr = 1; + spxx_int_msk.s.abnorm = 1; + spxx_int_msk.s.prtnxa = 1; + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64); + + stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface)); + stxx_int_msk.s.frmerr = 1; + stxx_int_msk.s.unxfrm = 1; + stxx_int_msk.s.nosync = 1; + stxx_int_msk.s.diperr = 1; + stxx_int_msk.s.datovr = 1; + stxx_int_msk.s.ovrbst = 1; + stxx_int_msk.s.calpar1 = 1; + stxx_int_msk.s.calpar0 = 1; + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64); +} + +static void cvm_oct_spi_poll(struct net_device *dev) +{ + static int spi4000_port; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface; + + for (interface = 0; interface < 2; interface++) { + if ((priv->port == interface * 16) && need_retrain[interface]) { + if (cvmx_spi_restart_interface + (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) { + need_retrain[interface] = 0; + cvm_oct_spi_enable_error_reporting(interface); + } + } + + /* + * The SPI4000 TWSI interface is very slow. In order + * not to bring the system to a crawl, we only poll a + * single port every second. This means negotiation + * speed changes take up to 10 seconds, but at least + * we don't waste absurd amounts of time waiting for + * TWSI. + */ + if (priv->port == spi4000_port) { + /* + * This function does nothing if it is called on an + * interface without a SPI4000. + */ + cvmx_spi4000_check_speed(interface, priv->port); + /* + * Normal ordering increments. By decrementing + * we only match once per iteration. + */ + spi4000_port--; + if (spi4000_port < 0) + spi4000_port = 10; + } + } +} + +int cvm_oct_spi_init(struct net_device *dev) +{ + int r; + struct octeon_ethernet *priv = netdev_priv(dev); + + if (number_spi_ports == 0) { + r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt, + IRQF_SHARED, "SPI", &number_spi_ports); + if (r) + return r; + } + number_spi_ports++; + + if ((priv->port == 0) || (priv->port == 16)) { + cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port)); + priv->poll = cvm_oct_spi_poll; + } + cvm_oct_common_init(dev); + return 0; +} + +void cvm_oct_spi_uninit(struct net_device *dev) +{ + int interface; + + cvm_oct_common_uninit(dev); + number_spi_ports--; + if (number_spi_ports == 0) { + for (interface = 0; interface < 2; interface++) { + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); + } + free_irq(OCTEON_IRQ_RML, &number_spi_ports); + } +} diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c new file mode 100644 index 000000000000..ab7dd8216006 --- /dev/null +++ b/drivers/staging/octeon/ethernet-tx.c @@ -0,0 +1,717 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2010 Cavium Networks + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/ratelimit.h> +#include <linux/string.h> +#include <linux/interrupt.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +#include <linux/atomic.h> +#include <net/sch_generic.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-tx.h" +#include "ethernet-util.h" + +#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) + +/* + * You can define GET_SKBUFF_QOS() to override how the skbuff output + * function determines which output queue is used. The default + * implementation always uses the base queue for the port. If, for + * example, you wanted to use the skb->priority field, define + * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) + */ +#ifndef GET_SKBUFF_QOS +#define GET_SKBUFF_QOS(skb) 0 +#endif + +static void cvm_oct_tx_do_cleanup(unsigned long arg); +static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); + +/* Maximum number of SKBs to try to free per xmit packet. */ +#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) + +static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau) +{ + int undo; + + undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + + MAX_SKB_TO_FREE; + if (undo > 0) + cvmx_fau_atomic_add32(fau, -undo); + skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : + -skb_to_free; + return skb_to_free; +} + +static void cvm_oct_kick_tx_poll_watchdog(void) +{ + union cvmx_ciu_timx ciu_timx; + + ciu_timx.u64 = 0; + ciu_timx.s.one_shot = 1; + ciu_timx.s.len = cvm_oct_tx_poll_interval; + cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); +} + +static void cvm_oct_free_tx_skbs(struct net_device *dev) +{ + int skb_to_free; + int qos, queues_per_port; + int total_freed = 0; + int total_remaining = 0; + unsigned long flags; + struct octeon_ethernet *priv = netdev_priv(dev); + + queues_per_port = cvmx_pko_get_num_queues(priv->port); + /* Drain any pending packets in the free list */ + for (qos = 0; qos < queues_per_port; qos++) { + if (skb_queue_len(&priv->tx_free_list[qos]) == 0) + continue; + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, + MAX_SKB_TO_FREE); + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, + priv->fau + qos * 4); + total_freed += skb_to_free; + if (skb_to_free > 0) { + struct sk_buff *to_free_list = NULL; + + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + while (skb_to_free > 0) { + struct sk_buff *t; + + t = __skb_dequeue(&priv->tx_free_list[qos]); + t->next = to_free_list; + to_free_list = t; + skb_to_free--; + } + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, + flags); + /* Do the actual freeing outside of the lock. */ + while (to_free_list) { + struct sk_buff *t = to_free_list; + + to_free_list = to_free_list->next; + dev_kfree_skb_any(t); + } + } + total_remaining += skb_queue_len(&priv->tx_free_list[qos]); + } + if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev)) + netif_wake_queue(dev); + if (total_remaining) + cvm_oct_kick_tx_poll_watchdog(); +} + +/** + * cvm_oct_xmit - transmit a packet + * @skb: Packet to send + * @dev: Device info structure + * + * Returns Always returns NETDEV_TX_OK + */ +int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) +{ + union cvmx_pko_command_word0 pko_command; + union cvmx_buf_ptr hw_buffer; + u64 old_scratch; + u64 old_scratch2; + int qos; + int i; + enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; + struct octeon_ethernet *priv = netdev_priv(dev); + struct sk_buff *to_free_list; + int skb_to_free; + int buffers_to_free; + u32 total_to_clean; + unsigned long flags; +#if REUSE_SKBUFFS_WITHOUT_FREE + unsigned char *fpa_head; +#endif + + /* + * Prefetch the private data structure. It is larger than the + * one cache line. + */ + prefetch(priv); + + /* + * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to + * completely remove "qos" in the event neither interface + * supports multiple queues per port. + */ + if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || + (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { + qos = GET_SKBUFF_QOS(skb); + if (qos <= 0) + qos = 0; + else if (qos >= cvmx_pko_get_num_queues(priv->port)) + qos = 0; + } else { + qos = 0; + } + + if (USE_ASYNC_IOBDMA) { + /* Save scratch in case userspace is using it */ + CVMX_SYNCIOBDMA; + old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); + + /* + * Fetch and increment the number of packets to be + * freed. + */ + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, + FAU_NUM_PACKET_BUFFERS_TO_FREE, + 0); + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, + priv->fau + qos * 4, + MAX_SKB_TO_FREE); + } + + /* + * We have space for 6 segment pointers, If there will be more + * than that, we must linearize. + */ + if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { + if (unlikely(__skb_linearize(skb))) { + queue_type = QUEUE_DROP; + if (USE_ASYNC_IOBDMA) { + /* + * Get the number of skbuffs in use + * by the hardware + */ + CVMX_SYNCIOBDMA; + skb_to_free = + cvmx_scratch_read64(CVMX_SCR_SCRATCH); + } else { + /* + * Get the number of skbuffs in use + * by the hardware + */ + skb_to_free = + cvmx_fau_fetch_and_add32(priv->fau + + qos * 4, + MAX_SKB_TO_FREE); + } + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, + priv->fau + + qos * 4); + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + goto skip_xmit; + } + } + + /* + * The CN3XXX series of parts has an errata (GMX-401) which + * causes the GMX block to hang if a collision occurs towards + * the end of a <68 byte packet. As a workaround for this, we + * pad packets to be 68 bytes whenever we are in half duplex + * mode. We don't handle the case of having a small packet but + * no room to add the padding. The kernel should always give + * us at least a cache line + */ + if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_gmxx_prtx_cfg gmx_prt_cfg; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + if (interface < 2) { + /* We only need to pad packet in half duplex mode */ + gmx_prt_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + if (gmx_prt_cfg.s.duplex == 0) { + int add_bytes = 64 - skb->len; + + if ((skb_tail_pointer(skb) + add_bytes) <= + skb_end_pointer(skb)) + __skb_put_zero(skb, add_bytes); + } + } + } + + /* Build the PKO command */ + pko_command.u64 = 0; +#ifdef __LITTLE_ENDIAN + pko_command.s.le = 1; +#endif + pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ + pko_command.s.segs = 1; + pko_command.s.total_bytes = skb->len; + pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; + pko_command.s.subone0 = 1; + + pko_command.s.dontfree = 1; + + /* Build the PKO buffer pointer */ + hw_buffer.u64 = 0; + if (skb_shinfo(skb)->nr_frags == 0) { + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); + hw_buffer.s.pool = 0; + hw_buffer.s.size = skb->len; + } else { + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); + hw_buffer.s.pool = 0; + hw_buffer.s.size = skb_headlen(skb); + CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *fs = skb_shinfo(skb)->frags + i; + + hw_buffer.s.addr = + XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs)); + hw_buffer.s.size = skb_frag_size(fs); + CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; + } + hw_buffer.s.addr = + XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb)); + hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; + pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; + pko_command.s.gather = 1; + goto dont_put_skbuff_in_hw; + } + + /* + * See if we can put this skb in the FPA pool. Any strange + * behavior from the Linux networking stack will most likely + * be caused by a bug in the following code. If some field is + * in use by the network stack and gets carried over when a + * buffer is reused, bad things may happen. If in doubt and + * you dont need the absolute best performance, disable the + * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has + * shown a 25% increase in performance under some loads. + */ +#if REUSE_SKBUFFS_WITHOUT_FREE + fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); + if (unlikely(skb->data < fpa_head)) { + /* TX buffer beginning can't meet FPA alignment constraints */ + goto dont_put_skbuff_in_hw; + } + if (unlikely + ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { + /* TX buffer isn't large enough for the FPA */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_shared(skb))) { + /* TX buffer sharing data with someone else */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_cloned(skb))) { + /* TX buffer has been cloned */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_header_cloned(skb))) { + /* TX buffer header has been cloned */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb->destructor)) { + /* TX buffer has a destructor */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_shinfo(skb)->nr_frags)) { + /* TX buffer has fragments */ + goto dont_put_skbuff_in_hw; + } + if (unlikely + (skb->truesize != + sizeof(*skb) + skb_end_offset(skb))) { + /* TX buffer truesize has been changed */ + goto dont_put_skbuff_in_hw; + } + + /* + * We can use this buffer in the FPA. We don't need the FAU + * update anymore + */ + pko_command.s.dontfree = 0; + + hw_buffer.s.back = ((unsigned long)skb->data >> 7) - + ((unsigned long)fpa_head >> 7); + + *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; + + /* + * The skbuff will be reused without ever being freed. We must + * cleanup a bunch of core things. + */ + dst_release(skb_dst(skb)); + skb_dst_set(skb, NULL); + skb_ext_reset(skb); + nf_reset_ct(skb); + skb_reset_redirect(skb); + +#ifdef CONFIG_NET_SCHED + skb->tc_index = 0; +#endif /* CONFIG_NET_SCHED */ +#endif /* REUSE_SKBUFFS_WITHOUT_FREE */ + +dont_put_skbuff_in_hw: + + /* Check if we can use the hardware checksumming */ + if ((skb->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->version == 4) && + (ip_hdr(skb)->ihl == 5) && + ((ip_hdr(skb)->frag_off == 0) || + (ip_hdr(skb)->frag_off == htons(1 << 14))) && + ((ip_hdr(skb)->protocol == IPPROTO_TCP) || + (ip_hdr(skb)->protocol == IPPROTO_UDP))) { + /* Use hardware checksum calc */ + pko_command.s.ipoffp1 = skb_network_offset(skb) + 1; + } + + if (USE_ASYNC_IOBDMA) { + /* Get the number of skbuffs in use by the hardware */ + CVMX_SYNCIOBDMA; + skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); + } else { + /* Get the number of skbuffs in use by the hardware */ + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, + MAX_SKB_TO_FREE); + buffers_to_free = + cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + } + + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, + priv->fau + qos * 4); + + /* + * If we're sending faster than the receive can free them then + * don't do the HW free. + */ + if ((buffers_to_free < -100) && !pko_command.s.dontfree) + pko_command.s.dontfree = 1; + + if (pko_command.s.dontfree) { + queue_type = QUEUE_CORE; + pko_command.s.reg0 = priv->fau + qos * 4; + } else { + queue_type = QUEUE_HW; + } + if (USE_ASYNC_IOBDMA) + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, + FAU_TOTAL_TX_TO_CLEAN, 1); + + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + + /* Drop this packet if we have too many already queued to the HW */ + if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= + MAX_OUT_QUEUE_DEPTH)) { + if (dev->tx_queue_len != 0) { + /* Drop the lock when notifying the core. */ + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, + flags); + netif_stop_queue(dev); + spin_lock_irqsave(&priv->tx_free_list[qos].lock, + flags); + } else { + /* If not using normal queueing. */ + queue_type = QUEUE_DROP; + goto skip_xmit; + } + } + + cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, + CVMX_PKO_LOCK_NONE); + + /* Send the packet to the output queue */ + if (unlikely(cvmx_pko_send_packet_finish(priv->port, + priv->queue + qos, + pko_command, hw_buffer, + CVMX_PKO_LOCK_NONE))) { + printk_ratelimited("%s: Failed to send the packet\n", + dev->name); + queue_type = QUEUE_DROP; + } +skip_xmit: + to_free_list = NULL; + + switch (queue_type) { + case QUEUE_DROP: + skb->next = to_free_list; + to_free_list = skb; + dev->stats.tx_dropped++; + break; + case QUEUE_HW: + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); + break; + case QUEUE_CORE: + __skb_queue_tail(&priv->tx_free_list[qos], skb); + break; + default: + BUG(); + } + + while (skb_to_free > 0) { + struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); + + t->next = to_free_list; + to_free_list = t; + skb_to_free--; + } + + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + + /* Do the actual freeing outside of the lock. */ + while (to_free_list) { + struct sk_buff *t = to_free_list; + + to_free_list = to_free_list->next; + dev_kfree_skb_any(t); + } + + if (USE_ASYNC_IOBDMA) { + CVMX_SYNCIOBDMA; + total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + /* Restore the scratch area */ + cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); + cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); + } else { + total_to_clean = + cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); + } + + if (total_to_clean & 0x3ff) { + /* + * Schedule the cleanup tasklet every 1024 packets for + * the pathological case of high traffic on one port + * delaying clean up of packets on a different port + * that is blocked waiting for the cleanup. + */ + tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); + } + + cvm_oct_kick_tx_poll_watchdog(); + + return NETDEV_TX_OK; +} + +/** + * cvm_oct_xmit_pow - transmit a packet to the POW + * @skb: Packet to send + * @dev: Device info structure + + * Returns Always returns zero + */ +int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + void *packet_buffer; + void *copy_location; + + /* Get a work queue entry */ + struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); + + if (unlikely(!work)) { + printk_ratelimited("%s: Failed to allocate a work queue entry\n", + dev->name); + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return 0; + } + + /* Get a packet buffer */ + packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); + if (unlikely(!packet_buffer)) { + printk_ratelimited("%s: Failed to allocate a packet buffer\n", + dev->name); + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return 0; + } + + /* + * Calculate where we need to copy the data to. We need to + * leave 8 bytes for a next pointer (unused). We also need to + * include any configure skip. Then we need to align the IP + * packet src and dest into the same 64bit word. The below + * calculation may add a little extra, but that doesn't + * hurt. + */ + copy_location = packet_buffer + sizeof(u64); + copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; + + /* + * We have to copy the packet since whoever processes this + * packet will free it to a hardware pool. We can't use the + * trick of counting outstanding packets like in + * cvm_oct_xmit. + */ + memcpy(copy_location, skb->data, skb->len); + + /* + * Fill in some of the work queue fields. We may need to add + * more if the software at the other end needs them. + */ + if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) + work->word0.pip.cn38xx.hw_chksum = skb->csum; + work->word1.len = skb->len; + cvmx_wqe_set_port(work, priv->port); + cvmx_wqe_set_qos(work, priv->port & 0x7); + cvmx_wqe_set_grp(work, pow_send_group); + work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + work->word1.tag = pow_send_group; /* FIXME */ + /* Default to zero. Sets of zero later are commented out */ + work->word2.u64 = 0; + work->word2.s.bufs = 1; + work->packet_ptr.u64 = 0; + work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); + work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; + work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; + work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; + + if (skb->protocol == htons(ETH_P_IP)) { + work->word2.s.ip_offset = 14; +#if 0 + work->word2.s.vlan_valid = 0; /* FIXME */ + work->word2.s.vlan_cfi = 0; /* FIXME */ + work->word2.s.vlan_id = 0; /* FIXME */ + work->word2.s.dec_ipcomp = 0; /* FIXME */ +#endif + work->word2.s.tcp_or_udp = + (ip_hdr(skb)->protocol == IPPROTO_TCP) || + (ip_hdr(skb)->protocol == IPPROTO_UDP); +#if 0 + /* FIXME */ + work->word2.s.dec_ipsec = 0; + /* We only support IPv4 right now */ + work->word2.s.is_v6 = 0; + /* Hardware would set to zero */ + work->word2.s.software = 0; + /* No error, packet is internal */ + work->word2.s.L4_error = 0; +#endif + work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || + (ip_hdr(skb)->frag_off == + cpu_to_be16(1 << 14))); +#if 0 + /* Assume Linux is sending a good packet */ + work->word2.s.IP_exc = 0; +#endif + work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); + work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); +#if 0 + /* This is an IP packet */ + work->word2.s.not_IP = 0; + /* No error, packet is internal */ + work->word2.s.rcv_error = 0; + /* No error, packet is internal */ + work->word2.s.err_code = 0; +#endif + + /* + * When copying the data, include 4 bytes of the + * ethernet header to align the same way hardware + * does. + */ + memcpy(work->packet_data, skb->data + 10, + sizeof(work->packet_data)); + } else { +#if 0 + work->word2.snoip.vlan_valid = 0; /* FIXME */ + work->word2.snoip.vlan_cfi = 0; /* FIXME */ + work->word2.snoip.vlan_id = 0; /* FIXME */ + work->word2.snoip.software = 0; /* Hardware would set to zero */ +#endif + work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); + work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); + work->word2.snoip.is_bcast = + (skb->pkt_type == PACKET_BROADCAST); + work->word2.snoip.is_mcast = + (skb->pkt_type == PACKET_MULTICAST); + work->word2.snoip.not_IP = 1; /* IP was done up above */ +#if 0 + /* No error, packet is internal */ + work->word2.snoip.rcv_error = 0; + /* No error, packet is internal */ + work->word2.snoip.err_code = 0; +#endif + memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); + } + + /* Submit the packet to the POW */ + cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, + cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work)); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); + return 0; +} + +/** + * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. + * @dev: Device being shutdown + * + */ +void cvm_oct_tx_shutdown_dev(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + unsigned long flags; + int qos; + + for (qos = 0; qos < 16; qos++) { + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + while (skb_queue_len(&priv->tx_free_list[qos])) + dev_kfree_skb_any(__skb_dequeue + (&priv->tx_free_list[qos])); + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + } +} + +static void cvm_oct_tx_do_cleanup(unsigned long arg) +{ + int port; + + for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { + if (cvm_oct_device[port]) { + struct net_device *dev = cvm_oct_device[port]; + + cvm_oct_free_tx_skbs(dev); + } + } +} + +static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) +{ + /* Disable the interrupt. */ + cvmx_write_csr(CVMX_CIU_TIMX(1), 0); + /* Do the work in the tasklet. */ + tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); + return IRQ_HANDLED; +} + +void cvm_oct_tx_initialize(void) +{ + int i; + + /* Disable the interrupt. */ + cvmx_write_csr(CVMX_CIU_TIMX(1), 0); + /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */ + i = request_irq(OCTEON_IRQ_TIMER1, + cvm_oct_tx_cleanup_watchdog, 0, + "Ethernet", cvm_oct_device); + + if (i) + panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); +} + +void cvm_oct_tx_shutdown(void) +{ + /* Free the interrupt handler */ + free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); +} diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h new file mode 100644 index 000000000000..78936e9b33b0 --- /dev/null +++ b/drivers/staging/octeon/ethernet-tx.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev); +int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); +int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, + int do_free, int qos); +void cvm_oct_tx_initialize(void); +void cvm_oct_tx_shutdown(void); +void cvm_oct_tx_shutdown_dev(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h new file mode 100644 index 000000000000..2af83a12ca78 --- /dev/null +++ b/drivers/staging/octeon/ethernet-util.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +/** + * cvm_oct_get_buffer_ptr - convert packet data address to pointer + * @packet_ptr: Packet data hardware address + * + * Returns Packet buffer pointer + */ +static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) +{ + return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back) + << 7); +} + +/** + * INTERFACE - convert IPD port to logical interface + * @ipd_port: Port to check + * + * Returns Logical interface + */ +static inline int INTERFACE(int ipd_port) +{ + int interface; + + if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS) + return 10; + interface = cvmx_helper_get_interface_num(ipd_port); + if (interface >= 0) + return interface; + panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__); +} + +/** + * INDEX - convert IPD/PKO port number to the port's interface index + * @ipd_port: Port to check + * + * Returns Index into interface port list + */ +static inline int INDEX(int ipd_port) +{ + return cvmx_helper_get_interface_index_num(ipd_port); +} diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c new file mode 100644 index 000000000000..f42c3816ce49 --- /dev/null +++ b/drivers/staging/octeon/ethernet.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2007 Cavium Networks + */ + +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/of_net.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> + +#include <net/dst.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" +#include "ethernet-mem.h" +#include "ethernet-rx.h" +#include "ethernet-tx.h" +#include "ethernet-mdio.h" +#include "ethernet-util.h" + +#define OCTEON_MAX_MTU 65392 + +static int num_packet_buffers = 1024; +module_param(num_packet_buffers, int, 0444); +MODULE_PARM_DESC(num_packet_buffers, "\n" + "\tNumber of packet buffers to allocate and store in the\n" + "\tFPA. By default, 1024 packet buffers are used.\n"); + +static int pow_receive_group = 15; +module_param(pow_receive_group, int, 0444); +MODULE_PARM_DESC(pow_receive_group, "\n" + "\tPOW group to receive packets from. All ethernet hardware\n" + "\twill be configured to send incoming packets to this POW\n" + "\tgroup. Also any other software can submit packets to this\n" + "\tgroup for the kernel to process."); + +static int receive_group_order; +module_param(receive_group_order, int, 0444); +MODULE_PARM_DESC(receive_group_order, "\n" + "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n" + "\twill be configured to send incoming packets to multiple POW\n" + "\tgroups. pow_receive_group parameter is ignored when multiple\n" + "\tgroups are taken into use and groups are allocated starting\n" + "\tfrom 0. By default, a single group is used.\n"); + +int pow_send_group = -1; +module_param(pow_send_group, int, 0644); +MODULE_PARM_DESC(pow_send_group, "\n" + "\tPOW group to send packets to other software on. This\n" + "\tcontrols the creation of the virtual device pow0.\n" + "\talways_use_pow also depends on this value."); + +int always_use_pow; +module_param(always_use_pow, int, 0444); +MODULE_PARM_DESC(always_use_pow, "\n" + "\tWhen set, always send to the pow group. This will cause\n" + "\tpackets sent to real ethernet devices to be sent to the\n" + "\tPOW group instead of the hardware. Unless some other\n" + "\tapplication changes the config, packets will still be\n" + "\treceived from the low level hardware. Use this option\n" + "\tto allow a CVMX app to intercept all packets from the\n" + "\tlinux kernel. You must specify pow_send_group along with\n" + "\tthis option."); + +char pow_send_list[128] = ""; +module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); +MODULE_PARM_DESC(pow_send_list, "\n" + "\tComma separated list of ethernet devices that should use the\n" + "\tPOW for transmit instead of the actual ethernet hardware. This\n" + "\tis a per port version of always_use_pow. always_use_pow takes\n" + "\tprecedence over this list. For example, setting this to\n" + "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" + "\tusing the pow_send_group."); + +int rx_napi_weight = 32; +module_param(rx_napi_weight, int, 0444); +MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); + +/* Mask indicating which receive groups are in use. */ +int pow_receive_groups; + +/* + * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. + * + * Set to one right before cvm_oct_poll_queue is destroyed. + */ +atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); + +/* + * Array of every ethernet device owned by this driver indexed by + * the ipd input port number. + */ +struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; + +u64 cvm_oct_tx_poll_interval; + +static void cvm_oct_rx_refill_worker(struct work_struct *work); +static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); + +static void cvm_oct_rx_refill_worker(struct work_struct *work) +{ + /* + * FPA 0 may have been drained, try to refill it if we need + * more than num_packet_buffers / 2, otherwise normal receive + * processing will refill it. If it were drained, no packets + * could be received so cvm_oct_napi_poll would never be + * invoked to do the refill. + */ + cvm_oct_rx_refill_pool(num_packet_buffers / 2); + + if (!atomic_read(&cvm_oct_poll_queue_stopping)) + schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); +} + +static void cvm_oct_periodic_worker(struct work_struct *work) +{ + struct octeon_ethernet *priv = container_of(work, + struct octeon_ethernet, + port_periodic_work.work); + + if (priv->poll) + priv->poll(cvm_oct_device[priv->port]); + + cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats + (cvm_oct_device[priv->port]); + + if (!atomic_read(&cvm_oct_poll_queue_stopping)) + schedule_delayed_work(&priv->port_periodic_work, HZ); +} + +static void cvm_oct_configure_common_hw(void) +{ + /* Setup the FPA */ + cvmx_fpa_enable(); + cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, + num_packet_buffers); + cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, + num_packet_buffers); + if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) + cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024); + +#ifdef __LITTLE_ENDIAN + { + union cvmx_ipd_ctl_status ipd_ctl_status; + + ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + ipd_ctl_status.s.pkt_lend = 1; + ipd_ctl_status.s.wqe_lend = 1; + cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64); + } +#endif + + cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); +} + +/** + * cvm_oct_free_work- Free a work queue entry + * + * @work_queue_entry: Work queue entry to free + * + * Returns Zero on success, Negative on failure. + */ +int cvm_oct_free_work(void *work_queue_entry) +{ + struct cvmx_wqe *work = work_queue_entry; + + int segments = work->word2.s.bufs; + union cvmx_buf_ptr segment_ptr = work->packet_ptr; + + while (segments--) { + union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr(segment_ptr.s.addr - 8); + if (unlikely(!segment_ptr.s.i)) + cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), + segment_ptr.s.pool, + CVMX_FPA_PACKET_POOL_SIZE / 128); + segment_ptr = next_ptr; + } + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); + + return 0; +} +EXPORT_SYMBOL(cvm_oct_free_work); + +/** + * cvm_oct_common_get_stats - get the low level ethernet statistics + * @dev: Device to get the statistics from + * + * Returns Pointer to the statistics + */ +static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) +{ + cvmx_pip_port_status_t rx_status; + cvmx_pko_port_status_t tx_status; + struct octeon_ethernet *priv = netdev_priv(dev); + + if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { + if (octeon_is_simulation()) { + /* The simulator doesn't support statistics */ + memset(&rx_status, 0, sizeof(rx_status)); + memset(&tx_status, 0, sizeof(tx_status)); + } else { + cvmx_pip_get_port_status(priv->port, 1, &rx_status); + cvmx_pko_get_port_status(priv->port, 1, &tx_status); + } + + dev->stats.rx_packets += rx_status.inb_packets; + dev->stats.tx_packets += tx_status.packets; + dev->stats.rx_bytes += rx_status.inb_octets; + dev->stats.tx_bytes += tx_status.octets; + dev->stats.multicast += rx_status.multicast_packets; + dev->stats.rx_crc_errors += rx_status.inb_errors; + dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets; + dev->stats.rx_dropped += rx_status.dropped_packets; + } + + return &dev->stats; +} + +/** + * cvm_oct_common_change_mtu - change the link MTU + * @dev: Device to change + * @new_mtu: The new MTU + * + * Returns Zero on success + */ +static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + int vlan_bytes = VLAN_HLEN; +#else + int vlan_bytes = 0; +#endif + int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes; + + dev->mtu = new_mtu; + + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + int index = INDEX(priv->port); + /* Add ethernet header and FCS, and VLAN if configured. */ + int max_packet = new_mtu + mtu_overhead; + + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || + OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* Signal errors on packets larger than the MTU */ + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), + max_packet); + } else { + /* + * Set the hardware to truncate packets larger + * than the MTU and smaller the 64 bytes. + */ + union cvmx_pip_frm_len_chkx frm_len_chk; + + frm_len_chk.u64 = 0; + frm_len_chk.s.minlen = VLAN_ETH_ZLEN; + frm_len_chk.s.maxlen = max_packet; + cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), + frm_len_chk.u64); + } + /* + * Set the hardware to truncate packets larger than + * the MTU. The jabber register must be set to a + * multiple of 8 bytes, so round up. + */ + cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), + (max_packet + 7) & ~7u); + } + return 0; +} + +/** + * cvm_oct_common_set_multicast_list - set the multicast list + * @dev: Device to work on + */ +static void cvm_oct_common_set_multicast_list(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + union cvmx_gmxx_rxx_adr_ctl control; + int index = INDEX(priv->port); + + control.u64 = 0; + control.s.bcst = 1; /* Allow broadcast MAC addresses */ + + if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || + (dev->flags & IFF_PROMISC)) + /* Force accept multicast packets */ + control.s.mcst = 2; + else + /* Force reject multicast packets */ + control.s.mcst = 1; + + if (dev->flags & IFF_PROMISC) + /* + * Reject matches if promisc. Since CAM is + * shut off, should accept everything. + */ + control.s.cam_mode = 0; + else + /* Filter packets based on the CAM */ + control.s.cam_mode = 1; + + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64 & ~1ull); + + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), + control.u64); + if (dev->flags & IFF_PROMISC) + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN + (index, interface), 0); + else + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN + (index, interface), 1); + + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64); + } +} + +static int cvm_oct_set_mac_filter(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_gmxx_prtx_cfg gmx_cfg; + int interface = INTERFACE(priv->port); + + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + int i; + u8 *ptr = dev->dev_addr; + u64 mac = 0; + int index = INDEX(priv->port); + + for (i = 0; i < 6; i++) + mac = (mac << 8) | (u64)ptr[i]; + + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64 & ~1ull); + + cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), + ptr[0]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), + ptr[1]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), + ptr[2]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), + ptr[3]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), + ptr[4]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), + ptr[5]); + cvm_oct_common_set_multicast_list(dev); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64); + } + return 0; +} + +/** + * cvm_oct_common_set_mac_address - set the hardware MAC address for a device + * @dev: The device in question. + * @addr: Socket address. + * + * Returns Zero on success + */ +static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) +{ + int r = eth_mac_addr(dev, addr); + + if (r) + return r; + return cvm_oct_set_mac_filter(dev); +} + +/** + * cvm_oct_common_init - per network device initialization + * @dev: Device to initialize + * + * Returns Zero on success + */ +int cvm_oct_common_init(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + const u8 *mac = NULL; + + if (priv->of_node) + mac = of_get_mac_address(priv->of_node); + + if (!IS_ERR_OR_NULL(mac)) + ether_addr_copy(dev->dev_addr, mac); + else + eth_hw_addr_random(dev); + + /* + * Force the interface to use the POW send if always_use_pow + * was specified or it is in the pow send list. + */ + if ((pow_send_group != -1) && + (always_use_pow || strstr(pow_send_list, dev->name))) + priv->queue = -1; + + if (priv->queue != -1) + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + + /* We do our own locking, Linux doesn't need to */ + dev->features |= NETIF_F_LLTX; + dev->ethtool_ops = &cvm_oct_ethtool_ops; + + cvm_oct_set_mac_filter(dev); + dev_set_mtu(dev, dev->mtu); + + /* + * Zero out stats for port so we won't mistakenly show + * counters from the bootloader. + */ + memset(dev->netdev_ops->ndo_get_stats(dev), 0, + sizeof(struct net_device_stats)); + + if (dev->netdev_ops->ndo_stop) + dev->netdev_ops->ndo_stop(dev); + + return 0; +} + +void cvm_oct_common_uninit(struct net_device *dev) +{ + if (dev->phydev) + phy_disconnect(dev->phydev); +} + +int cvm_oct_common_open(struct net_device *dev, + void (*link_poll)(struct net_device *)) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + union cvmx_helper_link_info link_info; + int rv; + + rv = cvm_oct_phy_setup_device(dev); + if (rv) + return rv; + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 1; + if (octeon_has_feature(OCTEON_FEATURE_PKND)) + gmx_cfg.s.pknd = priv->port; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + if (octeon_is_simulation()) + return 0; + + if (dev->phydev) { + int r = phy_read_status(dev->phydev); + + if (r == 0 && dev->phydev->link == 0) + netif_carrier_off(dev); + cvm_oct_adjust_link(dev); + } else { + link_info = cvmx_helper_link_get(priv->port); + if (!link_info.s.link_up) + netif_carrier_off(dev); + priv->poll = link_poll; + link_poll(dev); + } + + return 0; +} + +void cvm_oct_link_poll(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_helper_link_info link_info; + + link_info = cvmx_helper_link_get(priv->port); + if (link_info.u64 == priv->link_info) + return; + + if (cvmx_helper_link_set(priv->port, link_info)) + link_info.u64 = priv->link_info; + else + priv->link_info = link_info.u64; + + if (link_info.s.link_up) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + } + cvm_oct_note_carrier(priv, link_info); +} + +static int cvm_oct_xaui_open(struct net_device *dev) +{ + return cvm_oct_common_open(dev, cvm_oct_link_poll); +} + +static const struct net_device_ops cvm_oct_npi_netdev_ops = { + .ndo_init = cvm_oct_common_init, + .ndo_uninit = cvm_oct_common_uninit, + .ndo_start_xmit = cvm_oct_xmit, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static const struct net_device_ops cvm_oct_xaui_netdev_ops = { + .ndo_init = cvm_oct_common_init, + .ndo_uninit = cvm_oct_common_uninit, + .ndo_open = cvm_oct_xaui_open, + .ndo_stop = cvm_oct_common_stop, + .ndo_start_xmit = cvm_oct_xmit, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { + .ndo_init = cvm_oct_sgmii_init, + .ndo_uninit = cvm_oct_common_uninit, + .ndo_open = cvm_oct_sgmii_open, + .ndo_stop = cvm_oct_common_stop, + .ndo_start_xmit = cvm_oct_xmit, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static const struct net_device_ops cvm_oct_spi_netdev_ops = { + .ndo_init = cvm_oct_spi_init, + .ndo_uninit = cvm_oct_spi_uninit, + .ndo_start_xmit = cvm_oct_xmit, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { + .ndo_init = cvm_oct_common_init, + .ndo_uninit = cvm_oct_common_uninit, + .ndo_open = cvm_oct_rgmii_open, + .ndo_stop = cvm_oct_common_stop, + .ndo_start_xmit = cvm_oct_xmit, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static const struct net_device_ops cvm_oct_pow_netdev_ops = { + .ndo_init = cvm_oct_common_init, + .ndo_start_xmit = cvm_oct_xmit_pow, + .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, + .ndo_set_mac_address = cvm_oct_common_set_mac_address, + .ndo_do_ioctl = cvm_oct_ioctl, + .ndo_change_mtu = cvm_oct_common_change_mtu, + .ndo_get_stats = cvm_oct_common_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cvm_oct_poll_controller, +#endif +}; + +static struct device_node *cvm_oct_of_get_child + (const struct device_node *parent, int reg_val) +{ + struct device_node *node = NULL; + int size; + const __be32 *addr; + + for (;;) { + node = of_get_next_child(parent, node); + if (!node) + break; + addr = of_get_property(node, "reg", &size); + if (addr && (be32_to_cpu(*addr) == reg_val)) + break; + } + return node; +} + +static struct device_node *cvm_oct_node_for_port(struct device_node *pip, + int interface, int port) +{ + struct device_node *ni, *np; + + ni = cvm_oct_of_get_child(pip, interface); + if (!ni) + return NULL; + + np = cvm_oct_of_get_child(ni, port); + of_node_put(ni); + + return np; +} + +static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface, + int port) +{ + struct device_node *np = priv->of_node; + u32 delay_value; + bool rx_delay; + bool tx_delay; + + /* By default, both RX/TX delay is enabled in + * __cvmx_helper_rgmii_enable(). + */ + rx_delay = true; + tx_delay = true; + + if (!of_property_read_u32(np, "rx-delay", &delay_value)) { + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); + rx_delay = delay_value > 0; + } + if (!of_property_read_u32(np, "tx-delay", &delay_value)) { + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); + tx_delay = delay_value > 0; + } + + if (!rx_delay && !tx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + else if (!rx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID; + else if (!tx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID; + else + priv->phy_mode = PHY_INTERFACE_MODE_RGMII; +} + +static int cvm_oct_probe(struct platform_device *pdev) +{ + int num_interfaces; + int interface; + int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; + int qos; + struct device_node *pip; + int mtu_overhead = ETH_HLEN + ETH_FCS_LEN; + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + mtu_overhead += VLAN_HLEN; +#endif + + octeon_mdiobus_force_mod_depencency(); + + pip = pdev->dev.of_node; + if (!pip) { + pr_err("Error: No 'pip' in /aliases\n"); + return -EINVAL; + } + + cvm_oct_configure_common_hw(); + + cvmx_helper_initialize_packet_io_global(); + + if (receive_group_order) { + if (receive_group_order > 4) + receive_group_order = 4; + pow_receive_groups = (1 << (1 << receive_group_order)) - 1; + } else { + pow_receive_groups = BIT(pow_receive_group); + } + + /* Change the input group for all ports before input is enabled */ + num_interfaces = cvmx_helper_get_number_of_interfaces(); + for (interface = 0; interface < num_interfaces; interface++) { + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + + for (port = cvmx_helper_get_ipd_port(interface, 0); + port < cvmx_helper_get_ipd_port(interface, num_ports); + port++) { + union cvmx_pip_prt_tagx pip_prt_tagx; + + pip_prt_tagx.u64 = + cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); + + if (receive_group_order) { + int tag_mask; + + /* We support only 16 groups at the moment, so + * always disable the two additional "hidden" + * tag_mask bits on CN68XX. + */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + pip_prt_tagx.u64 |= 0x3ull << 44; + + tag_mask = ~((1 << receive_group_order) - 1); + pip_prt_tagx.s.grptagbase = 0; + pip_prt_tagx.s.grptagmask = tag_mask; + pip_prt_tagx.s.grptag = 1; + pip_prt_tagx.s.tag_mode = 0; + pip_prt_tagx.s.inc_prt_flag = 1; + pip_prt_tagx.s.ip6_dprt_flag = 1; + pip_prt_tagx.s.ip4_dprt_flag = 1; + pip_prt_tagx.s.ip6_sprt_flag = 1; + pip_prt_tagx.s.ip4_sprt_flag = 1; + pip_prt_tagx.s.ip6_dst_flag = 1; + pip_prt_tagx.s.ip4_dst_flag = 1; + pip_prt_tagx.s.ip6_src_flag = 1; + pip_prt_tagx.s.ip4_src_flag = 1; + pip_prt_tagx.s.grp = 0; + } else { + pip_prt_tagx.s.grptag = 0; + pip_prt_tagx.s.grp = pow_receive_group; + } + + cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), + pip_prt_tagx.u64); + } + } + + cvmx_helper_ipd_and_packet_input_enable(); + + memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); + + /* + * Initialize the FAU used for counting packet buffers that + * need to be freed. + */ + cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + + /* Initialize the FAU used for counting tx SKBs that need to be freed */ + cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); + + if ((pow_send_group != -1)) { + struct net_device *dev; + + dev = alloc_etherdev(sizeof(struct octeon_ethernet)); + if (dev) { + /* Initialize the device private structure. */ + struct octeon_ethernet *priv = netdev_priv(dev); + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &cvm_oct_pow_netdev_ops; + priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; + priv->port = CVMX_PIP_NUM_INPUT_PORTS; + priv->queue = -1; + strscpy(dev->name, "pow%d", sizeof(dev->name)); + for (qos = 0; qos < 16; qos++) + skb_queue_head_init(&priv->tx_free_list[qos]); + dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; + dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; + + if (register_netdev(dev) < 0) { + pr_err("Failed to register ethernet device for POW\n"); + free_netdev(dev); + } else { + cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; + pr_info("%s: POW send group %d, receive group %d\n", + dev->name, pow_send_group, + pow_receive_group); + } + } else { + pr_err("Failed to allocate ethernet device for POW\n"); + } + } + + num_interfaces = cvmx_helper_get_number_of_interfaces(); + for (interface = 0; interface < num_interfaces; interface++) { + cvmx_helper_interface_mode_t imode = + cvmx_helper_interface_get_mode(interface); + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + int port_index; + + for (port_index = 0, + port = cvmx_helper_get_ipd_port(interface, 0); + port < cvmx_helper_get_ipd_port(interface, num_ports); + port_index++, port++) { + struct octeon_ethernet *priv; + struct net_device *dev = + alloc_etherdev(sizeof(struct octeon_ethernet)); + if (!dev) { + pr_err("Failed to allocate ethernet device for port %d\n", + port); + continue; + } + + /* Initialize the device private structure. */ + SET_NETDEV_DEV(dev, &pdev->dev); + priv = netdev_priv(dev); + priv->netdev = dev; + priv->of_node = cvm_oct_node_for_port(pip, interface, + port_index); + + INIT_DELAYED_WORK(&priv->port_periodic_work, + cvm_oct_periodic_worker); + priv->imode = imode; + priv->port = port; + priv->queue = cvmx_pko_get_base_queue(priv->port); + priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; + priv->phy_mode = PHY_INTERFACE_MODE_NA; + for (qos = 0; qos < 16; qos++) + skb_queue_head_init(&priv->tx_free_list[qos]); + for (qos = 0; qos < cvmx_pko_get_num_queues(port); + qos++) + cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); + dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; + dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; + + switch (priv->imode) { + /* These types don't support ports to IPD/PKO */ + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + break; + + case CVMX_HELPER_INTERFACE_MODE_NPI: + dev->netdev_ops = &cvm_oct_npi_netdev_ops; + strscpy(dev->name, "npi%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_XAUI: + dev->netdev_ops = &cvm_oct_xaui_netdev_ops; + strscpy(dev->name, "xaui%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_LOOP: + dev->netdev_ops = &cvm_oct_npi_netdev_ops; + strscpy(dev->name, "loop%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_SGMII: + priv->phy_mode = PHY_INTERFACE_MODE_SGMII; + dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; + strscpy(dev->name, "eth%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_SPI: + dev->netdev_ops = &cvm_oct_spi_netdev_ops; + strscpy(dev->name, "spi%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_GMII: + priv->phy_mode = PHY_INTERFACE_MODE_GMII; + dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; + strscpy(dev->name, "eth%d", sizeof(dev->name)); + break; + + case CVMX_HELPER_INTERFACE_MODE_RGMII: + dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; + strscpy(dev->name, "eth%d", sizeof(dev->name)); + cvm_set_rgmii_delay(priv, interface, + port_index); + break; + } + + if (!dev->netdev_ops) { + free_netdev(dev); + } else if (register_netdev(dev) < 0) { + pr_err("Failed to register ethernet device for interface %d, port %d\n", + interface, priv->port); + free_netdev(dev); + } else { + cvm_oct_device[priv->port] = dev; + fau -= + cvmx_pko_get_num_queues(priv->port) * + sizeof(u32); + schedule_delayed_work(&priv->port_periodic_work, + HZ); + } + } + } + + cvm_oct_tx_initialize(); + cvm_oct_rx_initialize(); + + /* + * 150 uS: about 10 1500-byte packets at 1GE. + */ + cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); + + schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); + + return 0; +} + +static int cvm_oct_remove(struct platform_device *pdev) +{ + int port; + + cvmx_ipd_disable(); + + atomic_inc_return(&cvm_oct_poll_queue_stopping); + cancel_delayed_work_sync(&cvm_oct_rx_refill_work); + + cvm_oct_rx_shutdown(); + cvm_oct_tx_shutdown(); + + cvmx_pko_disable(); + + /* Free the ethernet devices */ + for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { + if (cvm_oct_device[port]) { + struct net_device *dev = cvm_oct_device[port]; + struct octeon_ethernet *priv = netdev_priv(dev); + + cancel_delayed_work_sync(&priv->port_periodic_work); + + cvm_oct_tx_shutdown_dev(dev); + unregister_netdev(dev); + free_netdev(dev); + cvm_oct_device[port] = NULL; + } + } + + cvmx_pko_shutdown(); + + cvmx_ipd_free_ptr(); + + /* Free the HW pools */ + cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, + num_packet_buffers); + cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, + num_packet_buffers); + if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) + cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); + return 0; +} + +static const struct of_device_id cvm_oct_match[] = { + { + .compatible = "cavium,octeon-3860-pip", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, cvm_oct_match); + +static struct platform_driver cvm_oct_driver = { + .probe = cvm_oct_probe, + .remove = cvm_oct_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = cvm_oct_match, + }, +}; + +module_platform_driver(cvm_oct_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h new file mode 100644 index 000000000000..a6140705706f --- /dev/null +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is based on code from OCTEON SDK by Cavium Networks. + * + * Copyright (c) 2003-2010 Cavium Networks + */ + +/* + * External interface for the Cavium Octeon ethernet driver. + */ +#ifndef OCTEON_ETHERNET_H +#define OCTEON_ETHERNET_H + +#include <linux/of.h> +#include <linux/phy.h> + +#ifdef CONFIG_CAVIUM_OCTEON_SOC + +#include <asm/octeon/octeon.h> + +#include <asm/octeon/cvmx-asxx-defs.h> +#include <asm/octeon/cvmx-config.h> +#include <asm/octeon/cvmx-fau.h> +#include <asm/octeon/cvmx-gmxx-defs.h> +#include <asm/octeon/cvmx-helper.h> +#include <asm/octeon/cvmx-helper-util.h> +#include <asm/octeon/cvmx-ipd.h> +#include <asm/octeon/cvmx-ipd-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pip.h> +#include <asm/octeon/cvmx-pko.h> +#include <asm/octeon/cvmx-pow.h> +#include <asm/octeon/cvmx-scratch.h> +#include <asm/octeon/cvmx-spi.h> +#include <asm/octeon/cvmx-spxx-defs.h> +#include <asm/octeon/cvmx-stxx-defs.h> +#include <asm/octeon/cvmx-wqe.h> + +#else + +#include "octeon-stubs.h" + +#endif + +/** + * This is the definition of the Ethernet driver's private + * driver state stored in netdev_priv(dev). + */ +struct octeon_ethernet { + /* PKO hardware output port */ + int port; + /* PKO hardware queue for the port */ + int queue; + /* Hardware fetch and add to count outstanding tx buffers */ + int fau; + /* My netdev. */ + struct net_device *netdev; + /* + * Type of port. This is one of the enums in + * cvmx_helper_interface_mode_t + */ + int imode; + /* PHY mode */ + phy_interface_t phy_mode; + /* List of outstanding tx buffers per queue */ + struct sk_buff_head tx_free_list[16]; + unsigned int last_speed; + unsigned int last_link; + /* Last negotiated link state */ + u64 link_info; + /* Called periodically to check link status */ + void (*poll)(struct net_device *dev); + struct delayed_work port_periodic_work; + struct device_node *of_node; +}; + +int cvm_oct_free_work(void *work_queue_entry); + +int cvm_oct_rgmii_open(struct net_device *dev); + +int cvm_oct_sgmii_init(struct net_device *dev); +int cvm_oct_sgmii_open(struct net_device *dev); + +int cvm_oct_spi_init(struct net_device *dev); +void cvm_oct_spi_uninit(struct net_device *dev); + +int cvm_oct_common_init(struct net_device *dev); +void cvm_oct_common_uninit(struct net_device *dev); +void cvm_oct_adjust_link(struct net_device *dev); +int cvm_oct_common_stop(struct net_device *dev); +int cvm_oct_common_open(struct net_device *dev, + void (*link_poll)(struct net_device *)); +void cvm_oct_note_carrier(struct octeon_ethernet *priv, + union cvmx_helper_link_info li); +void cvm_oct_link_poll(struct net_device *dev); + +extern int always_use_pow; +extern int pow_send_group; +extern int pow_receive_groups; +extern char pow_send_list[]; +extern struct net_device *cvm_oct_device[]; +extern atomic_t cvm_oct_poll_queue_stopping; +extern u64 cvm_oct_tx_poll_interval; + +extern int rx_napi_weight; + +#endif diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h new file mode 100644 index 000000000000..d06743504f2b --- /dev/null +++ b/drivers/staging/octeon/octeon-stubs.h @@ -0,0 +1,1434 @@ +#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512 + +#ifndef XKPHYS_TO_PHYS +# define XKPHYS_TO_PHYS(p) (p) +#endif + +#define OCTEON_IRQ_WORKQ0 0 +#define OCTEON_IRQ_RML 0 +#define OCTEON_IRQ_TIMER1 0 +#define OCTEON_IS_MODEL(x) 0 +#define octeon_has_feature(x) 0 +#define octeon_get_clock_rate() 0 + +#define CVMX_SYNCIOBDMA do { } while (0) + +#define CVMX_HELPER_INPUT_TAG_TYPE 0 +#define CVMX_HELPER_FIRST_MBUFF_SKIP 7 +#define CVMX_FAU_REG_END (2048) +#define CVMX_FPA_OUTPUT_BUFFER_POOL (2) +#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16 +#define CVMX_FPA_PACKET_POOL (0) +#define CVMX_FPA_PACKET_POOL_SIZE 16 +#define CVMX_FPA_WQE_POOL (1) +#define CVMX_FPA_WQE_POOL_SIZE 16 +#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a) + (b)) +#define CVMX_GMXX_PRTX_CFG(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_JABBER(a, b) ((a) + (b)) +#define CVMX_IPD_CTL_STATUS 0 +#define CVMX_PIP_FRM_LEN_CHKX(a) (a) +#define CVMX_PIP_NUM_INPUT_PORTS 1 +#define CVMX_SCR_SCRATCH 0 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2 +#define CVMX_IPD_SUB_PORT_FCS 0 +#define CVMX_SSO_WQ_IQ_DIS 0 +#define CVMX_SSO_WQ_INT 0 +#define CVMX_POW_WQ_INT 0 +#define CVMX_SSO_WQ_INT_PC 0 +#define CVMX_NPI_RSL_INT_BLOCKS 0 +#define CVMX_POW_WQ_INT_PC 0 + +union cvmx_pip_wqe_word2 { + uint64_t u64; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:8; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t port:12; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s_cn68xx; + + struct { + uint64_t unused1:16; + uint64_t vlan:16; + uint64_t unused2:32; + } svlan; + struct { + uint64_t bufs:8; + uint64_t unused:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:12; + uint64_t software:1; + uint64_t unassigned3:1; + uint64_t is_rarp:1; + uint64_t is_arp:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } snoip; + +}; + +union cvmx_pip_wqe_word0 { + struct { + uint64_t next_ptr:40; + uint8_t unused; + __wsum hw_chksum; + } cn38xx; + struct { + uint64_t pknd:6; /* 0..5 */ + uint64_t unused2:2; /* 6..7 */ + uint64_t bpid:6; /* 8..13 */ + uint64_t unused1:18; /* 14..31 */ + uint64_t l2ptr:8; /* 32..39 */ + uint64_t l3ptr:8; /* 40..47 */ + uint64_t unused0:8; /* 48..55 */ + uint64_t l4ptr:8; /* 56..63 */ + } cn68xx; +}; + +union cvmx_wqe_word0 { + uint64_t u64; + union cvmx_pip_wqe_word0 pip; +}; + +union cvmx_wqe_word1 { + uint64_t u64; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t varies:14; + uint64_t len:16; + }; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:3; + uint64_t grp:6; + uint64_t zero_1:1; + uint64_t qos:3; + uint64_t zero_0:1; + uint64_t len:16; + } cn68xx; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:1; + uint64_t grp:4; + uint64_t qos:3; + uint64_t ipprt:6; + uint64_t len:16; + } cn38xx; +}; + +union cvmx_buf_ptr { + void *ptr; + uint64_t u64; + struct { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t addr:40; + } s; +}; + +struct cvmx_wqe { + union cvmx_wqe_word0 word0; + union cvmx_wqe_word1 word1; + union cvmx_pip_wqe_word2 word2; + union cvmx_buf_ptr packet_ptr; + uint8_t packet_data[96]; +}; + +union cvmx_helper_link_info { + uint64_t u64; + struct { + uint64_t reserved_20_63:44; + uint64_t link_up:1; /**< Is the physical link up? */ + uint64_t full_duplex:1; /**< 1 if the link is full duplex */ + uint64_t speed:18; /**< Speed of the link in Mbps */ + } s; +}; + +enum cvmx_fau_reg_32 { + CVMX_FAU_REG_32_START = 0, +}; + +enum cvmx_fau_op_size { + CVMX_FAU_OP_SIZE_8 = 0, + CVMX_FAU_OP_SIZE_16 = 1, + CVMX_FAU_OP_SIZE_32 = 2, + CVMX_FAU_OP_SIZE_64 = 3 +}; + +typedef enum { + CVMX_SPI_MODE_UNKNOWN = 0, + CVMX_SPI_MODE_TX_HALFPLEX = 1, + CVMX_SPI_MODE_RX_HALFPLEX = 2, + CVMX_SPI_MODE_DUPLEX = 3 +} cvmx_spi_mode_t; + +typedef enum { + CVMX_HELPER_INTERFACE_MODE_DISABLED, + CVMX_HELPER_INTERFACE_MODE_RGMII, + CVMX_HELPER_INTERFACE_MODE_GMII, + CVMX_HELPER_INTERFACE_MODE_SPI, + CVMX_HELPER_INTERFACE_MODE_PCIE, + CVMX_HELPER_INTERFACE_MODE_XAUI, + CVMX_HELPER_INTERFACE_MODE_SGMII, + CVMX_HELPER_INTERFACE_MODE_PICMG, + CVMX_HELPER_INTERFACE_MODE_NPI, + CVMX_HELPER_INTERFACE_MODE_LOOP, +} cvmx_helper_interface_mode_t; + +typedef enum { + CVMX_POW_WAIT = 1, + CVMX_POW_NO_WAIT = 0, +} cvmx_pow_wait_t; + +typedef enum { + CVMX_PKO_LOCK_NONE = 0, + CVMX_PKO_LOCK_ATOMIC_TAG = 1, + CVMX_PKO_LOCK_CMD_QUEUE = 2, +} cvmx_pko_lock_t; + +typedef enum { + CVMX_PKO_SUCCESS, + CVMX_PKO_INVALID_PORT, + CVMX_PKO_INVALID_QUEUE, + CVMX_PKO_INVALID_PRIORITY, + CVMX_PKO_NO_MEMORY, + CVMX_PKO_PORT_ALREADY_SETUP, + CVMX_PKO_CMD_QUEUE_INIT_ERROR +} cvmx_pko_status_t; + +enum cvmx_pow_tag_type { + CVMX_POW_TAG_TYPE_ORDERED = 0L, + CVMX_POW_TAG_TYPE_ATOMIC = 1L, + CVMX_POW_TAG_TYPE_NULL = 2L, + CVMX_POW_TAG_TYPE_NULL_NULL = 3L +}; + +union cvmx_ipd_ctl_status { + uint64_t u64; + struct cvmx_ipd_ctl_status_s { + uint64_t reserved_18_63:46; + uint64_t use_sop:1; + uint64_t rst_done:1; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } s; + struct cvmx_ipd_ctl_status_cn30xx { + uint64_t reserved_10_63:54; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn30xx; + struct cvmx_ipd_ctl_status_cn38xxp2 { + uint64_t reserved_9_63:55; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn38xxp2; + struct cvmx_ipd_ctl_status_cn50xx { + uint64_t reserved_15_63:49; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn50xx; + struct cvmx_ipd_ctl_status_cn58xx { + uint64_t reserved_12_63:52; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn58xx; + struct cvmx_ipd_ctl_status_cn63xxp1 { + uint64_t reserved_16_63:48; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn63xxp1; +}; + +union cvmx_ipd_sub_port_fcs { + uint64_t u64; + struct cvmx_ipd_sub_port_fcs_s { + uint64_t port_bit:32; + uint64_t reserved_32_35:4; + uint64_t port_bit2:4; + uint64_t reserved_40_63:24; + } s; + struct cvmx_ipd_sub_port_fcs_cn30xx { + uint64_t port_bit:3; + uint64_t reserved_3_63:61; + } cn30xx; + struct cvmx_ipd_sub_port_fcs_cn38xx { + uint64_t port_bit:32; + uint64_t reserved_32_63:32; + } cn38xx; +}; + +union cvmx_ipd_sub_port_qos_cnt { + uint64_t u64; + struct cvmx_ipd_sub_port_qos_cnt_s { + uint64_t cnt:32; + uint64_t port_qos:9; + uint64_t reserved_41_63:23; + } s; +}; + +typedef struct { + uint32_t dropped_octets; + uint32_t dropped_packets; + uint32_t pci_raw_packets; + uint32_t octets; + uint32_t packets; + uint32_t multicast_packets; + uint32_t broadcast_packets; + uint32_t len_64_packets; + uint32_t len_65_127_packets; + uint32_t len_128_255_packets; + uint32_t len_256_511_packets; + uint32_t len_512_1023_packets; + uint32_t len_1024_1518_packets; + uint32_t len_1519_max_packets; + uint32_t fcs_align_err_packets; + uint32_t runt_packets; + uint32_t runt_crc_packets; + uint32_t oversize_packets; + uint32_t oversize_crc_packets; + uint32_t inb_packets; + uint64_t inb_octets; + uint16_t inb_errors; +} cvmx_pip_port_status_t; + +typedef struct { + uint32_t packets; + uint64_t octets; + uint64_t doorbell; +} cvmx_pko_port_status_t; + +union cvmx_pip_frm_len_chkx { + uint64_t u64; + struct cvmx_pip_frm_len_chkx_s { + uint64_t reserved_32_63:32; + uint64_t maxlen:16; + uint64_t minlen:16; + } s; +}; + +union cvmx_gmxx_rxx_frm_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_ctl_s { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } s; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t reserved_9_63:55; + } cn30xx; + struct cvmx_gmxx_rxx_frm_ctl_cn31xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t reserved_8_63:56; + } cn31xx; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn50xx; + struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t reserved_10_63:54; + } cn56xxp1; + struct cvmx_gmxx_rxx_frm_ctl_cn58xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn58xx; + struct cvmx_gmxx_rxx_frm_ctl_cn61xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } cn61xx; +}; + +union cvmx_gmxx_rxx_int_reg { + uint64_t u64; + struct cvmx_gmxx_rxx_int_reg_s { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } s; + struct cvmx_gmxx_rxx_int_reg_cn30xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t reserved_19_63:45; + } cn30xx; + struct cvmx_gmxx_rxx_int_reg_cn50xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t reserved_6_6:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn50xx; + struct cvmx_gmxx_rxx_int_reg_cn52xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn52xx; + struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t reserved_27_63:37; + } cn56xxp1; + struct cvmx_gmxx_rxx_int_reg_cn58xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn58xx; + struct cvmx_gmxx_rxx_int_reg_cn61xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn61xx; +}; + +union cvmx_gmxx_prtx_cfg { + uint64_t u64; + struct cvmx_gmxx_prtx_cfg_s { + uint64_t reserved_22_63:42; + uint64_t pknd:6; + uint64_t reserved_14_15:2; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } s; + struct cvmx_gmxx_prtx_cfg_cn30xx { + uint64_t reserved_4_63:60; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn30xx; + struct cvmx_gmxx_prtx_cfg_cn52xx { + uint64_t reserved_14_63:50; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn52xx; +}; + +union cvmx_gmxx_rxx_adr_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_ctl_s { + uint64_t reserved_4_63:60; + uint64_t cam_mode:1; + uint64_t mcst:2; + uint64_t bcst:1; + } s; +}; + +union cvmx_pip_prt_tagx { + uint64_t u64; + struct cvmx_pip_prt_tagx_s { + uint64_t reserved_54_63:10; + uint64_t portadd_en:1; + uint64_t inc_hwchk:1; + uint64_t reserved_50_51:2; + uint64_t grptagbase_msb:2; + uint64_t reserved_46_47:2; + uint64_t grptagmask_msb:2; + uint64_t reserved_42_43:2; + uint64_t grp_msb:2; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } s; + struct cvmx_pip_prt_tagx_cn30xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t reserved_30_30:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn30xx; + struct cvmx_pip_prt_tagx_cn50xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn50xx; +}; + +union cvmx_spxx_int_reg { + uint64_t u64; + struct cvmx_spxx_int_reg_s { + uint64_t reserved_32_63:32; + uint64_t spf:1; + uint64_t reserved_12_30:19; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_spxx_int_msk { + uint64_t u64; + struct cvmx_spxx_int_msk_s { + uint64_t reserved_12_63:52; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_pow_wq_int { + uint64_t u64; + struct cvmx_pow_wq_int_s { + uint64_t wq_int:16; + uint64_t iq_dis:16; + uint64_t reserved_32_63:32; + } s; +}; + +union cvmx_sso_wq_int_thrx { + uint64_t u64; + struct { + uint64_t iq_thr:12; + uint64_t reserved_12_13:2; + uint64_t ds_thr:12; + uint64_t reserved_26_27:2; + uint64_t tc_thr:4; + uint64_t tc_en:1; + uint64_t reserved_33_63:31; + } s; +}; + +union cvmx_stxx_int_reg { + uint64_t u64; + struct cvmx_stxx_int_reg_s { + uint64_t reserved_9_63:55; + uint64_t syncerr:1; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_stxx_int_msk { + uint64_t u64; + struct cvmx_stxx_int_msk_s { + uint64_t reserved_8_63:56; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_pow_wq_int_pc { + uint64_t u64; + struct cvmx_pow_wq_int_pc_s { + uint64_t reserved_0_7:8; + uint64_t pc_thr:20; + uint64_t reserved_28_31:4; + uint64_t pc:28; + uint64_t reserved_60_63:4; + } s; +}; + +union cvmx_pow_wq_int_thrx { + uint64_t u64; + struct cvmx_pow_wq_int_thrx_s { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_23_23:1; + uint64_t ds_thr:11; + uint64_t reserved_11_11:1; + uint64_t iq_thr:11; + } s; + struct cvmx_pow_wq_int_thrx_cn30xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_18_23:6; + uint64_t ds_thr:6; + uint64_t reserved_6_11:6; + uint64_t iq_thr:6; + } cn30xx; + struct cvmx_pow_wq_int_thrx_cn31xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_20_23:4; + uint64_t ds_thr:8; + uint64_t reserved_8_11:4; + uint64_t iq_thr:8; + } cn31xx; + struct cvmx_pow_wq_int_thrx_cn52xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_21_23:3; + uint64_t ds_thr:9; + uint64_t reserved_9_11:3; + uint64_t iq_thr:9; + } cn52xx; + struct cvmx_pow_wq_int_thrx_cn63xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_22_23:2; + uint64_t ds_thr:10; + uint64_t reserved_10_11:2; + uint64_t iq_thr:10; + } cn63xx; +}; + +union cvmx_npi_rsl_int_blocks { + uint64_t u64; + struct cvmx_npi_rsl_int_blocks_s { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t reserved_28_29:2; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t reserved_13_14:2; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npi_rsl_int_blocks_cn30xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn30xx; + struct cvmx_npi_rsl_int_blocks_cn38xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t rint_13:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn38xx; + struct cvmx_npi_rsl_int_blocks_cn50xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn50xx; +}; + +union cvmx_pko_command_word0 { + uint64_t u64; + struct { + uint64_t total_bytes:16; + uint64_t segs:6; + uint64_t dontfree:1; + uint64_t ignore_i:1; + uint64_t ipoffp1:7; + uint64_t gather:1; + uint64_t rsp:1; + uint64_t wqp:1; + uint64_t n2:1; + uint64_t le:1; + uint64_t reg0:11; + uint64_t subone0:1; + uint64_t reg1:11; + uint64_t subone1:1; + uint64_t size0:2; + uint64_t size1:2; + } s; +}; + +union cvmx_ciu_timx { + uint64_t u64; + struct cvmx_ciu_timx_s { + uint64_t reserved_37_63:27; + uint64_t one_shot:1; + uint64_t len:36; + } s; +}; + +union cvmx_gmxx_rxx_rx_inbnd { + uint64_t u64; + struct cvmx_gmxx_rxx_rx_inbnd_s { + uint64_t status:1; + uint64_t speed:2; + uint64_t duplex:1; + uint64_t reserved_4_63:60; + } s; +}; + +static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg, + int32_t value) +{ + return value; +} + +static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg, + int32_t value) +{ } + +static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg, + int32_t value) +{ } + +static inline uint64_t cvmx_scratch_read64(uint64_t address) +{ + return 0; +} + +static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) +{ } + +static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work) +{ + return 0; +} + +static inline void *cvmx_phys_to_ptr(uint64_t physical_address) +{ + return (void *)(uintptr_t)(physical_address); +} + +static inline uint64_t cvmx_ptr_to_phys(void *ptr) +{ + return (unsigned long)ptr; +} + +static inline int cvmx_helper_get_interface_num(int ipd_port) +{ + return ipd_port; +} + +static inline int cvmx_helper_get_interface_index_num(int ipd_port) +{ + return ipd_port; +} + +static inline void cvmx_fpa_enable(void) +{ } + +static inline uint64_t cvmx_read_csr(uint64_t csr_addr) +{ + return 0; +} + +static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val) +{ } + +static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) +{ + return 0; +} + +static inline void *cvmx_fpa_alloc(uint64_t pool) +{ + return NULL; +} + +static inline void cvmx_fpa_free(void *ptr, uint64_t pool, + uint64_t num_cache_lines) +{ } + +static inline int octeon_is_simulation(void) +{ + return 1; +} + +static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pip_port_status_t *status) +{ } + +static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pko_port_status_t *status) +{ } + +static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int + interface) +{ + return 0; +} + +static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) +{ + union cvmx_helper_link_info ret = { .u64 = 0 }; + + return ret; +} + +static inline int cvmx_helper_link_set(int ipd_port, + union cvmx_helper_link_info link_info) +{ + return 0; +} + +static inline int cvmx_helper_initialize_packet_io_global(void) +{ + return 0; +} + +static inline int cvmx_helper_get_number_of_interfaces(void) +{ + return 2; +} + +static inline int cvmx_helper_ports_on_interface(int interface) +{ + return 1; +} + +static inline int cvmx_helper_get_ipd_port(int interface, int port) +{ + return 0; +} + +static inline int cvmx_helper_ipd_and_packet_input_enable(void) +{ + return 0; +} + +static inline void cvmx_ipd_disable(void) +{ } + +static inline void cvmx_ipd_free_ptr(void) +{ } + +static inline void cvmx_pko_disable(void) +{ } + +static inline void cvmx_pko_shutdown(void) +{ } + +static inline int cvmx_pko_get_base_queue_per_core(int port, int core) +{ + return port; +} + +static inline int cvmx_pko_get_base_queue(int port) +{ + return port; +} + +static inline int cvmx_pko_get_num_queues(int port) +{ + return port; +} + +static inline unsigned int cvmx_get_core_num(void) +{ + return 0; +} + +static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline void cvmx_pow_work_request_async(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr) +{ + struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr; + + return wqe; +} + +static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) +{ + return (void *)(unsigned long)wait; +} + +static inline int cvmx_spi_restart_interface(int interface, + cvmx_spi_mode_t mode, int timeout) +{ + return 0; +} + +static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, + enum cvmx_fau_reg_32 reg, + int32_t value) +{ } + +static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed( + int interface, + int port) +{ + union cvmx_gmxx_rxx_rx_inbnd r; + + r.u64 = 0; + return r; +} + +static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, + cvmx_pko_lock_t use_locking) +{ } + +static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port, + uint64_t queue, union cvmx_pko_command_word0 pko_command, + union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) +{ + return 0; +} + +static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port) +{ } + +static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos) +{ } + +static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work) +{ + return 0; +} + +static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp) +{ } + +static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t qos, uint64_t grp) +{ } + +#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a) + (b)) +#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a) + (b)) +#define CVMX_CIU_TIMX(a) (a) +#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a) + (b)) +#define CVMX_GMXX_RXX_INT_REG(a, b) ((a) + (b)) +#define CVMX_GMXX_SMACX(a, b) ((a) + (b)) +#define CVMX_PIP_PRT_TAGX(a) (a) +#define CVMX_POW_PP_GRP_MSKX(a) (a) +#define CVMX_POW_WQ_INT_THRX(a) (a) +#define CVMX_SPXX_INT_MSK(a) (a) +#define CVMX_SPXX_INT_REG(a) (a) +#define CVMX_SSO_PPX_GRP_MSK(a) (a) +#define CVMX_SSO_WQ_INT_THRX(a) (a) +#define CVMX_STXX_INT_MSK(a) (a) +#define CVMX_STXX_INT_REG(a) (a) diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts index 61fad96818c2..096137fcd5cc 100644 --- a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts +++ b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts @@ -3,51 +3,46 @@ /plugin/; / { - compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709"; + compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709"; +}; - fragment@0 { - target = <&spi0>; - __overlay__ { - status = "okay"; +&spi0 { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; - spidev@0{ - status = "disabled"; - }; + spidev@0{ + reg = <0>; + status = "disabled"; + }; - spidev@1{ - status = "disabled"; - }; - }; + spidev@1{ + reg = <1>; + status = "disabled"; }; +}; - fragment@1 { - target = <&gpio>; - __overlay__ { - pi433_pins: pi433_pins { - brcm,pins = <7 25 24>; - brcm,function = <0 0 0>; // in in in - }; - }; +&gpio { + pi433_pins: pi433_pins { + brcm,pins = <7 25 24>; + brcm,function = <0 0 0>; // in in in }; +}; - fragment@2 { - target = <&spi0>; - __overlay__ { - #address-cells = <1>; - #size-cells = <0>; - status = "okay"; - - pi433: pi433@0 { - compatible = "Smarthome-Wolf,pi433"; - reg = <0>; - spi-max-frequency = <10000000>; - status = "okay"; - - pinctrl-0 = <&pi433_pins>; - DIO0-gpio = <&gpio 24 0>; - DIO1-gpio = <&gpio 25 0>; - DIO2-gpio = <&gpio 7 0>; - }; - }; +&spi0 { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + pi433: pi433@0 { + compatible = "Smarthome-Wolf,pi433"; + reg = <0>; + spi-max-frequency = <10000000>; + status = "okay"; + + pinctrl-0 = <&pi433_pins>; + DIO0-gpio = <&gpio 24 0>; + DIO1-gpio = <&gpio 25 0>; + DIO2-gpio = <&gpio 7 0>; }; }; diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h index 9feb95c431cb..16c5b7fba249 100644 --- a/drivers/staging/pi433/pi433_if.h +++ b/drivers/staging/pi433/pi433_if.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * include/linux/TODO * * userspace interface for pi433 radio module diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h index d43a8d87d5d3..b648ba5fff89 100644 --- a/drivers/staging/pi433/rf69.h +++ b/drivers/staging/pi433/rf69.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * hardware abstraction/register access for HopeRf rf69 radio module * * Copyright (C) 2016 Wolf-Entwicklungen diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h index 3ee1952245c2..fbf56fcf5fe8 100644 --- a/drivers/staging/pi433/rf69_enum.h +++ b/drivers/staging/pi433/rf69_enum.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * enumerations for HopeRf rf69 radio module * * Copyright (C) 2016 Wolf-Entwicklungen diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h index be5497cdace0..a170c66c3d5b 100644 --- a/drivers/staging/pi433/rf69_registers.h +++ b/drivers/staging/pi433/rf69_registers.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * register description for HopeRf rf69 radio module * * Copyright (C) 2016 Wolf-Entwicklungen diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 4bc5d5fce9bf..fc8c5ca8935d 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -16,8 +16,8 @@ /* * General definitions... */ -#define DRV_NAME "qlge" -#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " +#define DRV_NAME "qlge" +#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " #define DRV_VERSION "1.00.00.35" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ @@ -59,7 +59,7 @@ #define MAX_CQ 128 #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ -#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) +#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT / 2) #define UDELAY_COUNT 3 #define UDELAY_DELAY 100 @@ -119,7 +119,6 @@ enum { * Processor Address Register (PROC_ADDR) bit definitions. */ enum { - /* Misc. stuff */ MAILBOX_COUNT = 16, MAILBOX_TIMEOUT = 5, @@ -1077,11 +1076,11 @@ struct tx_buf_desc { * IOCB Definitions... */ -#define OPCODE_OB_MAC_IOCB 0x01 +#define OPCODE_OB_MAC_IOCB 0x01 #define OPCODE_OB_MAC_TSO_IOCB 0x02 -#define OPCODE_IB_MAC_IOCB 0x20 -#define OPCODE_IB_MPI_IOCB 0x21 -#define OPCODE_IB_AE_IOCB 0x3f +#define OPCODE_IB_MAC_IOCB 0x20 +#define OPCODE_IB_MPI_IOCB 0x21 +#define OPCODE_IB_AE_IOCB 0x3f struct ob_mac_iocb_req { u8 opcode; @@ -1173,15 +1172,15 @@ struct ib_mac_iocb_rsp { u8 flags1; #define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ #define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */ -#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ +#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ #define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ #define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ #define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ #define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ -#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ -#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ +#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ +#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ #define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ u8 flags2; #define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ @@ -1198,16 +1197,16 @@ struct ib_mac_iocb_rsp { #define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ u8 flags3; #define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ -#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ -#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ -#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ -#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ -#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ -#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ -#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ +#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ +#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ +#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ +#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ +#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ +#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ +#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ __le32 data_len; /* */ __le64 data_addr; /* */ __le32 rss; /* */ @@ -1233,17 +1232,17 @@ struct ib_ae_iocb_rsp { #define IB_AE_IOCB_RSP_OI 0x01 #define IB_AE_IOCB_RSP_I 0x02 u8 event; -#define LINK_UP_EVENT 0x00 -#define LINK_DOWN_EVENT 0x01 -#define CAM_LOOKUP_ERR_EVENT 0x06 -#define SOFT_ECC_ERROR_EVENT 0x07 -#define MGMT_ERR_EVENT 0x08 -#define TEN_GIG_MAC_EVENT 0x09 -#define GPI0_H2L_EVENT 0x10 -#define GPI0_L2H_EVENT 0x20 -#define GPI1_H2L_EVENT 0x11 -#define GPI1_L2H_EVENT 0x21 -#define PCI_ERR_ANON_BUF_RD 0x40 +#define LINK_UP_EVENT 0x00 +#define LINK_DOWN_EVENT 0x01 +#define CAM_LOOKUP_ERR_EVENT 0x06 +#define SOFT_ECC_ERROR_EVENT 0x07 +#define MGMT_ERR_EVENT 0x08 +#define TEN_GIG_MAC_EVENT 0x09 +#define GPI0_H2L_EVENT 0x10 +#define GPI0_L2H_EVENT 0x20 +#define GPI1_H2L_EVENT 0x11 +#define GPI1_L2H_EVENT 0x21 +#define PCI_ERR_ANON_BUF_RD 0x40 u8 q_id; __le32 reserved[15]; } __packed; @@ -1367,7 +1366,7 @@ struct tx_ring_desc { struct tx_ring_desc *next; }; -#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) +#define QL_TXQ_IDX(qdev, skb) (smp_processor_id() % (qdev->tx_ring_count)) struct tx_ring { /* @@ -1762,7 +1761,6 @@ struct ql_nic_misc { }; struct ql_reg_dump { - /* segment 0 */ struct mpi_coredump_global_header mpi_global_header; @@ -1792,7 +1790,7 @@ struct ql_reg_dump { /* segment 34 */ struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[8+2]; + u32 ets[8 + 2]; }; struct ql_mpi_coredump { @@ -2059,7 +2057,6 @@ enum { }; struct nic_operations { - int (*get_flash) (struct ql_adapter *); int (*port_initialize) (struct ql_adapter *); }; diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 8cf39615c520..1795533cbd3a 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -29,15 +29,13 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev, u32 reg, u32 reg_val) { u32 register_to_read; - int status = 0; register_to_read = MPI_NIC_REG_BLOCK | MPI_NIC_READ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) | reg; - status = ql_write_mpi_reg(qdev, register_to_read, reg_val); - return status; + return ql_write_mpi_reg(qdev, register_to_read, reg_val); } static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, @@ -499,6 +497,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, u32 offset, u32 count) { int i, status = 0; + for (i = 0; i < count; i++, buf++) { status = ql_read_mpi_reg(qdev, offset + i, buf); if (status) @@ -552,7 +551,6 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, PRB_MX_ADDR_VALID_FC_MOD, buf); return 0; - } /* Read out the routing index registers */ @@ -610,7 +608,6 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { switch (type) { - case 0: /* CAM */ initial_val |= MAC_ADDR_ADR; max_index = MAC_ADDR_MAX_CAM_ENTRIES; @@ -1204,7 +1201,6 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) err: ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ return status; - } static void ql_get_core_dump(struct ql_adapter *qdev) @@ -1324,27 +1320,10 @@ void ql_mpi_core_to_log(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_core_to_log.work); - u32 *tmp, count; - int i; - count = sizeof(struct ql_mpi_coredump) / sizeof(u32); - tmp = (u32 *)qdev->mpi_coredump; - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Core is dumping to log file!\n"); - - for (i = 0; i < count; i += 8) { - pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " - "%.08x %.08x %.08x\n", i, - tmp[i + 0], - tmp[i + 1], - tmp[i + 2], - tmp[i + 3], - tmp[i + 4], - tmp[i + 5], - tmp[i + 6], - tmp[i + 7]); - msleep(5); - } + print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n", + DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump, + sizeof(*qdev->mpi_coredump), false); } #ifdef QL_REG_DUMP @@ -1352,6 +1331,7 @@ static void ql_dump_intr_states(struct ql_adapter *qdev) { int i; u32 value; + for (i = 0; i < qdev->intr_count; i++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); value = ql_read32(qdev, INTR_EN); @@ -1437,6 +1417,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev) { int i; u32 value; + i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (i) return; @@ -1525,7 +1506,7 @@ void ql_dump_regs(struct ql_adapter *qdev) #ifdef QL_STAT_DUMP #define DUMP_STAT(qdev, stat) \ - pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) + pr_err("%s = %ld\n", #stat, (unsigned long)(qdev)->nic_stats.stat) void ql_dump_stat(struct ql_adapter *qdev) { @@ -1578,15 +1559,16 @@ void ql_dump_stat(struct ql_adapter *qdev) #ifdef QL_DEV_DUMP #define DUMP_QDEV_FIELD(qdev, type, field) \ - pr_err("qdev->%-24s = " type "\n", #field, qdev->field) + pr_err("qdev->%-24s = " type "\n", #field, (qdev)->(field)) #define DUMP_QDEV_DMA_FIELD(qdev, field) \ pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ pr_err("%s[%d].%s = " type "\n", \ - #array, index, #field, qdev->array[index].field); + #array, index, #field, (qdev)->array[index].field); void ql_dump_qdev(struct ql_adapter *qdev) { int i; + DUMP_QDEV_FIELD(qdev, "%lx", flags); DUMP_QDEV_FIELD(qdev, "%p", vlgrp); DUMP_QDEV_FIELD(qdev, "%p", pdev); @@ -1640,9 +1622,9 @@ void ql_dump_wqicb(struct wqicb *wqicb) le16_to_cpu(wqicb->cq_id_rss)); pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); pr_err("wqicb->wq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->addr)); + (unsigned long long)le64_to_cpu(wqicb->addr)); pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); + (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr)); } void ql_dump_tx_ring(struct tx_ring *tx_ring) @@ -1653,7 +1635,7 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) tx_ring->wq_id); pr_err("tx_ring->base = %p\n", tx_ring->wq_base); pr_err("tx_ring->base_dma = 0x%llx\n", - (unsigned long long) tx_ring->wq_base_dma); + (unsigned long long)tx_ring->wq_base_dma); pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", tx_ring->cnsmr_idx_sh_reg, tx_ring->cnsmr_idx_sh_reg @@ -1672,6 +1654,7 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) void ql_dump_ricb(struct ricb *ricb) { int i; + pr_err("===================== Dumping ricb ===============\n"); pr_err("Dumping ricb stuff...\n"); @@ -1706,21 +1689,21 @@ void ql_dump_cqicb(struct cqicb *cqicb) pr_err("cqicb->flags = %x\n", cqicb->flags); pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); pr_err("cqicb->addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->addr)); + (unsigned long long)le64_to_cpu(cqicb->addr)); pr_err("cqicb->prod_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); + (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr)); pr_err("cqicb->pkt_delay = 0x%.04x\n", le16_to_cpu(cqicb->pkt_delay)); pr_err("cqicb->irq_delay = 0x%.04x\n", le16_to_cpu(cqicb->irq_delay)); pr_err("cqicb->lbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); + (unsigned long long)le64_to_cpu(cqicb->lbq_addr)); pr_err("cqicb->lbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->lbq_buf_size)); pr_err("cqicb->lbq_len = 0x%.04x\n", le16_to_cpu(cqicb->lbq_len)); pr_err("cqicb->sbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); + (unsigned long long)le64_to_cpu(cqicb->sbq_addr)); pr_err("cqicb->sbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->sbq_buf_size)); pr_err("cqicb->sbq_len = 0x%.04x\n", @@ -1748,7 +1731,7 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); pr_err("rx_ring->cq_base_dma = %llx\n", - (unsigned long long) rx_ring->cq_base_dma); + (unsigned long long)rx_ring->cq_base_dma); pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", @@ -1756,7 +1739,7 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", - (unsigned long long) rx_ring->prod_idx_sh_reg_dma); + (unsigned long long)rx_ring->prod_idx_sh_reg_dma); pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", rx_ring->cnsmr_idx_db_reg); pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); @@ -1855,7 +1838,6 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd) pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); - } void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) @@ -1980,7 +1962,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) pr_err("data_len = %d\n", le32_to_cpu(ib_mac_rsp->data_len)); pr_err("data_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); + (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr)); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) pr_err("rss = %x\n", le32_to_cpu(ib_mac_rsp->rss)); @@ -1997,7 +1979,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) pr_err("hdr length = %d\n", le32_to_cpu(ib_mac_rsp->hdr_len)); pr_err("hdr addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); + (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr)); } } #endif diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c index 790997aff995..441ac08d14d2 100644 --- a/drivers/staging/qlge/qlge_ethtool.c +++ b/drivers/staging/qlge/qlge_ethtool.c @@ -259,8 +259,9 @@ static void ql_update_stats(struct ql_adapter *qdev) "Error reading status register 0x%.04x.\n", i); goto end; - } else + } else { *iter = data; + } iter++; } @@ -273,8 +274,9 @@ static void ql_update_stats(struct ql_adapter *qdev) "Error reading status register 0x%.04x.\n", i); goto end; - } else + } else { *iter = data; + } iter++; } @@ -290,8 +292,9 @@ static void ql_update_stats(struct ql_adapter *qdev) "Error reading status register 0x%.04x.\n", i); goto end; - } else + } else { *iter = data; + } iter++; } @@ -304,8 +307,9 @@ static void ql_update_stats(struct ql_adapter *qdev) "Error reading status register 0x%.04x.\n", i); goto end; - } else + } else { *iter = data; + } iter++; } @@ -316,8 +320,9 @@ static void ql_update_stats(struct ql_adapter *qdev) netif_err(qdev, drv, qdev->ndev, "Error reading status register 0x%.04x.\n", i); goto end; - } else + } else { *iter = data; + } end: ql_sem_unlock(qdev, qdev->xg_sem_mask); quit: @@ -488,8 +493,9 @@ static int ql_start_loopback(struct ql_adapter *qdev) if (netif_carrier_ok(qdev->ndev)) { set_bit(QL_LB_LINK_UP, &qdev->flags); netif_carrier_off(qdev->ndev); - } else + } else { clear_bit(QL_LB_LINK_UP, &qdev->flags); + } qdev->link_config |= CFG_LOOPBACK_PCS; return ql_mb_set_port_cfg(qdev); } @@ -686,7 +692,6 @@ static int ql_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ql_adapter *qdev = netdev_priv(netdev); - int status = 0; if ((pause->rx_pause) && (pause->tx_pause)) qdev->link_config |= CFG_PAUSE_STD; @@ -695,8 +700,7 @@ static int ql_set_pauseparam(struct net_device *netdev, else return -EINVAL; - status = ql_mb_set_port_cfg(qdev); - return status; + return ql_mb_set_port_cfg(qdev); } static u32 ql_get_msglevel(struct net_device *ndev) diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index ef8037d0b52e..c92820f07968 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -52,16 +52,12 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = - NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | -/* NETIF_MSG_TIMER | */ - NETIF_MSG_IFDOWN | - NETIF_MSG_IFUP | - NETIF_MSG_RX_ERR | - NETIF_MSG_TX_ERR | -/* NETIF_MSG_TX_QUEUED | */ -/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ -/* NETIF_MSG_PKTDATA | */ - NETIF_MSG_HW | NETIF_MSG_WOL | 0; + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | + NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | + NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | + NETIF_MSG_HW | NETIF_MSG_WOL | 0; static int debug = -1; /* defaults above */ module_param(debug, int, 0664); @@ -143,6 +139,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) { unsigned int wait_count = 30; + do { if (!ql_sem_trylock(qdev, sem_mask)) return 0; @@ -1210,6 +1207,7 @@ static void ql_unmap_send(struct ql_adapter *qdev, struct tx_ring_desc *tx_ring_desc, int mapped) { int i; + for (i = 0; i < mapped; i++) { if (i == 0 || (i == 7 && mapped > 7)) { /* @@ -1290,6 +1288,7 @@ static int ql_map_send(struct ql_adapter *qdev, */ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; + tbd++; if (frag_idx == 6 && frag_cnt > 7) { /* Let's tack on an sglist. @@ -1649,6 +1648,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & htons(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1818,6 +1818,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * eventually be in trouble. */ int size, i = 0; + sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); pci_unmap_single(qdev->pdev, sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); @@ -1936,6 +1937,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & htons(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2391,6 +2393,7 @@ static void qlge_restore_vlan(struct ql_adapter *qdev) static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; + napi_schedule(&rx_ring->napi); return IRQ_HANDLED; } @@ -2497,6 +2500,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; if (likely(l3_proto == htons(ETH_P_IP))) { struct iphdr *iph = ip_hdr(skb); + iph->check = 0; mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, @@ -2521,6 +2525,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb, int len; struct iphdr *iph = ip_hdr(skb); __sum16 *check; + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); mac_iocb_ptr->net_trans_offset = @@ -3896,14 +3901,11 @@ static void ql_release_adapter_resources(struct ql_adapter *qdev) static int ql_get_adapter_resources(struct ql_adapter *qdev) { - int status = 0; - if (ql_alloc_mem_resources(qdev)) { netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); return -ENOMEM; } - status = ql_request_irq(qdev); - return status; + return ql_request_irq(qdev); } static int qlge_close(struct net_device *ndev) @@ -4265,6 +4267,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ql_adapter *qdev = netdev_priv(ndev); + ql_queue_asic_error(qdev); } @@ -4273,6 +4276,7 @@ static void ql_asic_reset_work(struct work_struct *work) struct ql_adapter *qdev = container_of(work, struct ql_adapter, asic_reset_work.work); int status; + rtnl_lock(); status = ql_adapter_down(qdev); if (status) @@ -4344,6 +4348,7 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev) static int ql_get_board_info(struct ql_adapter *qdev) { int status; + qdev->func = (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; if (qdev->func > 3) @@ -4652,6 +4657,7 @@ static void qlge_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); + del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); unregister_netdev(ndev); diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c index bb03b2fa7233..60c08d9cc034 100644 --- a/drivers/staging/qlge/qlge_mpi.c +++ b/drivers/staging/qlge/qlge_mpi.c @@ -90,9 +90,7 @@ exit: int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) { - int status; - status = ql_write_mpi_reg(qdev, 0x00001010, 1); - return status; + return ql_write_mpi_reg(qdev, 0x00001010, 1); } /* Determine if we are in charge of the firwmare. If @@ -237,6 +235,7 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev) { int status; struct mbox_params *mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; status = ql_get_mb_sts(qdev, mbcp); if (status) { @@ -255,6 +254,7 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev) static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; + mbcp->out_count = 2; status = ql_get_mb_sts(qdev, mbcp); @@ -353,6 +353,7 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); else { int i; + netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); for (i = 0; i < mbcp->out_count; i++) netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", @@ -912,6 +913,7 @@ static int ql_idc_wait(struct ql_adapter *qdev) int status = -ETIMEDOUT; long wait_time = 1 * HZ; struct mbox_params *mbcp = &qdev->idc_mbc; + do { /* Wait here for the command to complete * via the IDC process. @@ -1096,6 +1098,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev) static int ql_set_port_cfg(struct ql_adapter *qdev) { int status; + status = ql_mb_set_port_cfg(qdev); if (status) return status; diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 815dfee11968..f69e9453ad45 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -199,7 +199,7 @@ _next: rtw_free_cmd_obj(pcmd); } else { /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */ - pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */ + pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */ } } else { RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode)); diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c index 6c2fe1a112ac..d0e41f2ef1ce 100644 --- a/drivers/staging/rtl8188eu/core/rtw_debug.c +++ b/drivers/staging/rtl8188eu/core/rtw_debug.c @@ -15,7 +15,7 @@ int proc_get_drv_version(char *page, char **start, { int len = 0; - len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION); + len += scnprintf(page + len, count - len, "%s\n", DRIVERVERSION); *eof = 1; return len; @@ -86,16 +86,16 @@ int proc_get_read_reg(char *page, char **start, switch (proc_get_read_len) { case 1: - len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr)); + len += scnprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr)); break; case 2: - len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr)); + len += scnprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr)); break; case 4: - len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr)); + len += scnprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr)); break; default: - len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); + len += scnprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); break; } @@ -138,7 +138,7 @@ int proc_get_adapter_state(char *page, char **start, struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); int len = 0; - len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n", + len += scnprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n", padapter->bSurpriseRemoved, padapter->bDriverStopped); *eof = 1; @@ -170,11 +170,11 @@ int proc_get_best_channel(char *page, char **start, } /* debug */ - len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n", + len += scnprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n", pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count); } - len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G); + len += scnprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G); *eof = 1; return len; diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c index 29f615443e8f..e186982d5908 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c +++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c @@ -236,14 +236,10 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv) ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->ssid.ssid_length, pdev_network->ssid.ssid, &sz); /* supported rates */ - if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) { - if (pdev_network->Configuration.DSConfig > 14) - wireless_mode = WIRELESS_11A_5N; - else - wireless_mode = WIRELESS_11BG_24N; - } else { + if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) + wireless_mode = WIRELESS_11BG_24N; + else wireless_mode = pregistrypriv->wireless_mode; - } rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode); diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index e764436e120f..9de2d421f6b1 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -149,7 +149,7 @@ static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network * (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) lifetime = 1; if (!isfreeall) { - delta_time = (curr_time - pnetwork->last_scanned)/HZ; + delta_time = (curr_time - pnetwork->last_scanned) / HZ; if (delta_time < lifetime)/* unit:sec */ return; } @@ -249,8 +249,8 @@ void rtw_generate_random_ibss(u8 *pibss) pibss[1] = 0x11; pibss[2] = 0x87; pibss[3] = (u8)(curtime & 0xff);/* p[0]; */ - pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */ - pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */ + pibss[4] = (u8)((curtime >> 8) & 0xff);/* p[1]; */ + pibss[5] = (u8)((curtime >> 16) & 0xff);/* p[2]; */ } u8 *rtw_get_capability_from_ie(u8 *ie) @@ -357,9 +357,9 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src, rssi_final = rssi_ori; } else { if (sq_smp != 101) { /* from the right channel */ - ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5; - sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5; - rssi_final = (src->Rssi+dst->Rssi*4)/5; + ss_final = ((u32)(src->PhyInfo.SignalStrength) + (u32)(dst->PhyInfo.SignalStrength) * 4) / 5; + sq_final = ((u32)(src->PhyInfo.SignalQuality) + (u32)(dst->PhyInfo.SignalQuality) * 4) / 5; + rssi_final = (src->Rssi + dst->Rssi * 4) / 5; } else { /* bss info not receiving from the right channel, use the original RX signal infos */ ss_final = dst->PhyInfo.SignalStrength; @@ -510,7 +510,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network * privacy = pnetwork->network.Privacy; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { - if (rtw_get_wps_ie(pnetwork->network.ies+_FIXED_IE_LENGTH_, pnetwork->network.ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen)) + if (rtw_get_wps_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, pnetwork->network.ie_length - _FIXED_IE_LENGTH_, NULL, &wps_ielen)) return true; else return false; @@ -924,8 +924,8 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net /* update fw_state will clr _FW_UNDER_LINKING here indirectly */ switch (pnetwork->network.InfrastructureMode) { case Ndis802_11Infrastructure: - if (pmlmepriv->fw_state&WIFI_UNDER_WPS) - pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS; + if (pmlmepriv->fw_state & WIFI_UNDER_WPS) + pmlmepriv->fw_state = WIFI_STATION_STATE | WIFI_UNDER_WPS; else pmlmepriv->fw_state = WIFI_STATION_STATE; break; @@ -1097,14 +1097,14 @@ static u8 search_max_mac_id(struct adapter *padapter) #if defined(CONFIG_88EU_AP_MODE) if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { for (aid = pstapriv->max_num_sta; aid > 0; aid--) { - if (pstapriv->sta_aid[aid-1]) + if (pstapriv->sta_aid[aid - 1]) break; } mac_id = aid + 1; } else #endif {/* adhoc id = 31~2 */ - for (mac_id = NUM_STA-1; mac_id >= IBSS_START_MAC_ID; mac_id--) { + for (mac_id = NUM_STA - 1; mac_id >= IBSS_START_MAC_ID; mac_id--) { if (pmlmeinfo->FW_sta_info[mac_id].status == 1) break; } @@ -1123,7 +1123,7 @@ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta) macid = search_max_mac_id(adapter); rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid); - media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */ + media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE:1 connect */ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); } @@ -1213,7 +1213,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf) if (mac_id >= 0) { u16 media_status; - media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */ + media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); } @@ -1640,7 +1640,7 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_ for (i = 12; i < in_len; i += (in_ie[i + 1] + 2) /* to the next IE element */) { ielength = initial_out_len; - if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { + if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /* WMM element ID and OUI */ /* Append WMM IE to the last index of out_ie */ @@ -1734,13 +1734,13 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_ authmode = _WPA2_IE_ID_; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { - memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); + memcpy(out_ie + ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); ielength += psecuritypriv->wps_ie_len; } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) { /* copy RSN or SSN */ - memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2); - ielength += psecuritypriv->supplicant_ie[1]+2; + memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1] + 2); + ielength += psecuritypriv->supplicant_ie[1] + 2; rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie); } @@ -1865,7 +1865,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ phtpriv->ht_option = false; - p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12); + p = rtw_get_ie(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12); if (p && ielen > 0) { struct ieee80211_ht_cap ht_cap; @@ -1904,16 +1904,16 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ else ht_cap.ampdu_params_info |= IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00; - rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_, + rtw_set_ie(out_ie + out_len, _HT_CAPABILITY_IE_, sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_cap, pout_len); phtpriv->ht_option = true; - p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12); + p = rtw_get_ie(in_ie + 12, _HT_ADD_INFO_IE_, &ielen, in_len - 12); if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) { out_len = *pout_len; - rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len); + rtw_set_ie(out_ie + out_len, _HT_ADD_INFO_IE_, ielen, p + 2, pout_len); } } return phtpriv->ht_option; diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index 36841d20c3c1..04897cd48370 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -1151,7 +1151,7 @@ static void issue_assocreq(struct adapter *padapter) if (!padapter->registrypriv.wifi_spec) { /* Commented by Kurt 20110629 */ /* In some older APs, WPS handshake */ - /* would be fail if we append vender extensions informations to AP */ + /* would be fail if we append vender extensions information to AP */ if (!memcmp(pIE->data, WPS_OUI, 4)) pIE->Length = 14; } diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c index 95f1b1431373..ebe19e076ff2 100644 --- a/drivers/staging/rtl8188eu/hal/hal_com.c +++ b/drivers/staging/rtl8188eu/hal/hal_com.c @@ -18,26 +18,26 @@ void dump_chip_info(struct HAL_VERSION chip_vers) uint cnt = 0; char buf[128]; - cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_"); - cnt += sprintf((buf+cnt), "%s_", chip_vers.ChipType == NORMAL_CHIP ? + cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_"); + cnt += sprintf((buf + cnt), "%s_", chip_vers.ChipType == NORMAL_CHIP ? "Normal_Chip" : "Test_Chip"); - cnt += sprintf((buf+cnt), "%s_", chip_vers.VendorType == CHIP_VENDOR_TSMC ? + cnt += sprintf((buf + cnt), "%s_", chip_vers.VendorType == CHIP_VENDOR_TSMC ? "TSMC" : "UMC"); if (chip_vers.CUTVersion == A_CUT_VERSION) - cnt += sprintf((buf+cnt), "A_CUT_"); + cnt += sprintf((buf + cnt), "A_CUT_"); else if (chip_vers.CUTVersion == B_CUT_VERSION) - cnt += sprintf((buf+cnt), "B_CUT_"); + cnt += sprintf((buf + cnt), "B_CUT_"); else if (chip_vers.CUTVersion == C_CUT_VERSION) - cnt += sprintf((buf+cnt), "C_CUT_"); + cnt += sprintf((buf + cnt), "C_CUT_"); else if (chip_vers.CUTVersion == D_CUT_VERSION) - cnt += sprintf((buf+cnt), "D_CUT_"); + cnt += sprintf((buf + cnt), "D_CUT_"); else if (chip_vers.CUTVersion == E_CUT_VERSION) - cnt += sprintf((buf+cnt), "E_CUT_"); + cnt += sprintf((buf + cnt), "E_CUT_"); else - cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_", + cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion); - cnt += sprintf((buf+cnt), "1T1R_"); - cnt += sprintf((buf+cnt), "RomVer(0)\n"); + cnt += sprintf((buf + cnt), "1T1R_"); + cnt += sprintf((buf + cnt), "RomVer(0)\n"); pr_info("%s", buf); } diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c index 7489491f5aaa..698377ea60ee 100644 --- a/drivers/staging/rtl8188eu/hal/odm.c +++ b/drivers/staging/rtl8188eu/hal/odm.c @@ -342,7 +342,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm) u8 CurrentIGI = pDM_DigTable->CurIGValue; ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG()==>\n")); - if ((!(pDM_Odm->SupportAbility&ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility&ODM_BB_FA_CNT))) { + if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))) { ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n")); return; @@ -419,7 +419,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm) } if (pDM_DigTable->LargeFAHit >= 3) { - if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max) + if ((pDM_DigTable->ForbiddenIGI + 1) > pDM_DigTable->rx_gain_range_max) pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max; else pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1); @@ -432,7 +432,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm) pDM_DigTable->Recover_cnt--; } else { if (pDM_DigTable->LargeFAHit < 3) { - if ((pDM_DigTable->ForbiddenIGI-1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */ + if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */ pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n")); @@ -518,24 +518,24 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm) phy_set_bb_reg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */ ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord); - FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff); - FalseAlmCnt->Cnt_SB_Search_fail = (ret_value & 0xffff0000)>>16; + FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff); + FalseAlmCnt->Cnt_SB_Search_fail = (ret_value & 0xffff0000) >> 16; ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord); - FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff); - FalseAlmCnt->Cnt_Parity_Fail = (ret_value & 0xffff0000)>>16; + FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff); + FalseAlmCnt->Cnt_Parity_Fail = (ret_value & 0xffff0000) >> 16; ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord); - FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff); - FalseAlmCnt->Cnt_Crc8_fail = (ret_value & 0xffff0000)>>16; + FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff); + FalseAlmCnt->Cnt_Crc8_fail = (ret_value & 0xffff0000) >> 16; ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord); - FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff); + FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff); FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal + FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail + FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail; ret_value = phy_query_bb_reg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord); - FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff); - FalseAlmCnt->Cnt_BW_USC = (ret_value & 0xffff0000)>>16; + FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff); + FalseAlmCnt->Cnt_BW_USC = (ret_value & 0xffff0000) >> 16; /* hold cck counter */ phy_set_bb_reg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(12), 1); @@ -544,10 +544,10 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm) ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0); FalseAlmCnt->Cnt_Cck_fail = ret_value; ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3); - FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8; + FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8; ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord); - FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8); + FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8); FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail + @@ -583,14 +583,14 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm) u8 CurCCK_CCAThres; struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt); - if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD|ODM_BB_FA_CNT))) + if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD | ODM_BB_FA_CNT))) return; if (pDM_Odm->ExtLNA) return; if (pDM_Odm->bLinked) { if (pDM_Odm->RSSI_Min > 25) { CurCCK_CCAThres = 0xcd; - } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) { + } else if (pDM_Odm->RSSI_Min > 10) { CurCCK_CCAThres = 0x83; } else { if (FalseAlmCnt->Cnt_Cck_fail > 1000) @@ -630,10 +630,10 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal) Rssi_Low_bound = 45; } if (pDM_PSTable->initialize == 0) { - pDM_PSTable->Reg874 = (phy_query_bb_reg(adapter, 0x874, bMaskDWord)&0x1CC000)>>14; - pDM_PSTable->RegC70 = (phy_query_bb_reg(adapter, 0xc70, bMaskDWord) & BIT(3))>>3; - pDM_PSTable->Reg85C = (phy_query_bb_reg(adapter, 0x85c, bMaskDWord)&0xFF000000)>>24; - pDM_PSTable->RegA74 = (phy_query_bb_reg(adapter, 0xa74, bMaskDWord)&0xF000)>>12; + pDM_PSTable->Reg874 = (phy_query_bb_reg(adapter, 0x874, bMaskDWord) & 0x1CC000) >> 14; + pDM_PSTable->RegC70 = (phy_query_bb_reg(adapter, 0xc70, bMaskDWord) & BIT(3)) >> 3; + pDM_PSTable->Reg85C = (phy_query_bb_reg(adapter, 0x85c, bMaskDWord) & 0xFF000000) >> 24; + pDM_PSTable->RegA74 = (phy_query_bb_reg(adapter, 0xa74, bMaskDWord) & 0xF000) >> 12; pDM_PSTable->initialize = 1; } @@ -718,13 +718,13 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u else rate_bitmap = 0x0000000f; break; - case (ODM_WM_A|ODM_WM_G): + case (ODM_WM_A | ODM_WM_G): if (rssi_level == DM_RATR_STA_HIGH) rate_bitmap = 0x00000f00; else rate_bitmap = 0x00000ff0; break; - case (ODM_WM_B|ODM_WM_G): + case (ODM_WM_B | ODM_WM_G): if (rssi_level == DM_RATR_STA_HIGH) rate_bitmap = 0x00000f00; else if (rssi_level == DM_RATR_STA_MIDDLE) @@ -732,8 +732,8 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u else rate_bitmap = 0x00000ff5; break; - case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G): - case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G): + case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G): + case (ODM_WM_A | ODM_WM_B | ODM_WM_G | ODM_WM_N24G): if (rssi_level == DM_RATR_STA_HIGH) { rate_bitmap = 0x000f0000; } else if (rssi_level == DM_RATR_STA_MIDDLE) { @@ -911,7 +911,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm) if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB) tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB; if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1)) - PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16)); + PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB << 16)); } } diff --git a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c index d5a9ac51e907..a6f2731b076d 100644 --- a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c +++ b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c @@ -103,33 +103,33 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm, switch (LNA_idx) { case 7: if (VGA_idx <= 27) - rx_pwr_all = -100 + 2 * (27-VGA_idx); /* VGA_idx = 27~2 */ + rx_pwr_all = -100 + 2 * (27 - VGA_idx); /* VGA_idx = 27~2 */ else rx_pwr_all = -100; break; case 6: - rx_pwr_all = -48 + 2 * (2-VGA_idx); /* VGA_idx = 2~0 */ + rx_pwr_all = -48 + 2 * (2 - VGA_idx); /* VGA_idx = 2~0 */ break; case 5: - rx_pwr_all = -42 + 2 * (7-VGA_idx); /* VGA_idx = 7~5 */ + rx_pwr_all = -42 + 2 * (7 - VGA_idx); /* VGA_idx = 7~5 */ break; case 4: - rx_pwr_all = -36 + 2 * (7-VGA_idx); /* VGA_idx = 7~4 */ + rx_pwr_all = -36 + 2 * (7 - VGA_idx); /* VGA_idx = 7~4 */ break; case 3: - rx_pwr_all = -24 + 2 * (7-VGA_idx); /* VGA_idx = 7~0 */ + rx_pwr_all = -24 + 2 * (7 - VGA_idx); /* VGA_idx = 7~0 */ break; case 2: if (cck_highpwr) - rx_pwr_all = -12 + 2 * (5-VGA_idx); /* VGA_idx = 5~0 */ + rx_pwr_all = -12 + 2 * (5 - VGA_idx); /* VGA_idx = 5~0 */ else - rx_pwr_all = -6 + 2 * (5-VGA_idx); + rx_pwr_all = -6 + 2 * (5 - VGA_idx); break; case 1: - rx_pwr_all = 8-2 * VGA_idx; + rx_pwr_all = 8 - 2 * VGA_idx; break; case 0: - rx_pwr_all = 14-2 * VGA_idx; + rx_pwr_all = 14 - 2 * VGA_idx; break; default: break; @@ -138,7 +138,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm, PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all); if (!cck_highpwr) { if (PWDB_ALL >= 80) - PWDB_ALL = ((PWDB_ALL-80)<<1) + ((PWDB_ALL-80)>>1) + 80; + PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80; else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20)) PWDB_ALL += 3; if (PWDB_ALL > 100) @@ -162,7 +162,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm, else if (SQ_rpt < 20) SQ = 100; else - SQ = ((64-SQ_rpt) * 100) / 44; + SQ = ((64 - SQ_rpt) * 100) / 44; } pPhyInfo->SignalQuality = SQ; pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ; @@ -200,8 +200,8 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm, pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI; /* Get Rx snr value in DB */ - pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2); - dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2); + pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2); + dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2); } /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110; @@ -280,8 +280,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm, if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) { if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) { if (pPktinfo->bPacketToSelf) { - antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) | - (pDM_FatTable->antsel_rx_keep_1<<1) | + antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) | + (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0; pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll; pDM_FatTable->antRSSIcnt[antsel_tr_mux]++; @@ -289,8 +289,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm, } } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) { if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) { - antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) | - (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0; + antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) | + (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0; rtl88eu_dm_ant_sel_statistics(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll); } } @@ -328,17 +328,17 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm, } else { if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) { UndecoratedSmoothedOFDM = - (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor-1)) + + (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) + (RSSI_Ave)) / (Rx_Smooth_Factor); UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1; } else { UndecoratedSmoothedOFDM = - (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor-1)) + + (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) + (RSSI_Ave)) / (Rx_Smooth_Factor); } } - pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT(0); + pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap << 1) | BIT(0); } else { RSSI_Ave = pPhyInfo->RxPWDBAll; @@ -349,16 +349,16 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm, } else { if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) { UndecoratedSmoothedCCK = - ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) + + ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) + pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor; UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1; } else { UndecoratedSmoothedCCK = - ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) + + ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) + pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor; } } - pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1; + pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap << 1; } /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */ if (pEntry->rssi_stat.ValidBit >= 64) @@ -367,16 +367,16 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm, pEntry->rssi_stat.ValidBit++; for (i = 0; i < pEntry->rssi_stat.ValidBit; i++) - OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i) & BIT(0); + OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap >> i) & BIT(0); if (pEntry->rssi_stat.ValidBit == 64) { Weighting = min_t(u32, OFDM_pkt << 4, 64); - UndecoratedSmoothedPWDB = (Weighting * UndecoratedSmoothedOFDM + (64-Weighting) * UndecoratedSmoothedCCK)>>6; + UndecoratedSmoothedPWDB = (Weighting * UndecoratedSmoothedOFDM + (64 - Weighting) * UndecoratedSmoothedCCK) >> 6; } else { if (pEntry->rssi_stat.ValidBit != 0) UndecoratedSmoothedPWDB = (OFDM_pkt * UndecoratedSmoothedOFDM + - (pEntry->rssi_stat.ValidBit-OFDM_pkt) * - UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit; + (pEntry->rssi_stat.ValidBit - OFDM_pkt) * + UndecoratedSmoothedCCK) / pEntry->rssi_stat.ValidBit; else UndecoratedSmoothedPWDB = 0; } diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c index afaf9e55195a..b9025815b682 100644 --- a/drivers/staging/rtl8188eu/hal/phy.c +++ b/drivers/staging/rtl8188eu/hal/phy.c @@ -69,10 +69,10 @@ static u32 rf_serial_read(struct adapter *adapt, bMaskDWord); tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | - (offset<<23) | bLSSIReadEdge; + (offset << 23) | bLSSIReadEdge; phy_set_bb_reg(adapt, rFPGA0_XA_HSSIParameter2, bMaskDWord, - tmplong&(~bLSSIReadEdge)); + tmplong & (~bLSSIReadEdge)); udelay(10); phy_set_bb_reg(adapt, phyreg->rfHSSIPara2, bMaskDWord, tmplong2); @@ -102,7 +102,7 @@ static void rf_serial_write(struct adapter *adapt, struct bb_reg_def *phyreg = &adapt->HalData->PHYRegDef[rfpath]; offset &= 0xff; - data_and_addr = ((offset<<20) | (data&0x000fffff)) & 0x0fffffff; + data_and_addr = ((offset << 20) | (data & 0x000fffff)) & 0x0fffffff; phy_set_bb_reg(adapt, phyreg->rf3wireOffset, bMaskDWord, data_and_addr); } @@ -143,20 +143,20 @@ static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr, for (TxCount = 0; TxCount < path_nums; TxCount++) { if (TxCount == RF_PATH_A) { cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index]; - ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ + ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] + hal_data->OFDM_24G_Diff[TxCount][RF_PATH_A]; - bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ + bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] + hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]; bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index]; } else if (TxCount == RF_PATH_B) { cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index]; - ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_A][index]+ + ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] + + hal_data->BW20_24G_Diff[RF_PATH_A][index] + hal_data->BW20_24G_Diff[TxCount][index]; - bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]+ + bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] + + hal_data->BW20_24G_Diff[TxCount][RF_PATH_A] + hal_data->BW20_24G_Diff[TxCount][index]; bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index]; } @@ -205,7 +205,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt) /* Set MAC register */ reg_bw_opmode = usb_read8(adapt, REG_BWOPMODE); - reg_prsr_rsc = usb_read8(adapt, REG_RRSR+2); + reg_prsr_rsc = usb_read8(adapt, REG_RRSR + 2); switch (hal_data->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: @@ -215,9 +215,9 @@ static void phy_set_bw_mode_callback(struct adapter *adapt) case HT_CHANNEL_WIDTH_40: reg_bw_opmode &= ~BW_OPMODE_20MHZ; usb_write8(adapt, REG_BWOPMODE, reg_bw_opmode); - reg_prsr_rsc = (reg_prsr_rsc&0x90) | - (hal_data->nCur40MhzPrimeSC<<5); - usb_write8(adapt, REG_RRSR+2, reg_prsr_rsc); + reg_prsr_rsc = (reg_prsr_rsc & 0x90) | + (hal_data->nCur40MhzPrimeSC << 5); + usb_write8(adapt, REG_RRSR + 2, reg_prsr_rsc); break; default: break; @@ -236,7 +236,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt) * These settings are required only for 40MHz */ phy_set_bb_reg(adapt, rCCK0_System, bCCKSideBand, - (hal_data->nCur40MhzPrimeSC>>1)); + (hal_data->nCur40MhzPrimeSC >> 1)); phy_set_bb_reg(adapt, rOFDM1_LSTF, 0xC00, hal_data->nCur40MhzPrimeSC); phy_set_bb_reg(adapt, 0x818, (BIT(26) | BIT(27)), @@ -337,8 +337,8 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type, if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *direction == 1) pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E; - *out_write_val = pwr_value | (pwr_value<<8) | (pwr_value<<16) | - (pwr_value<<24); + *out_write_val = pwr_value | (pwr_value << 8) | (pwr_value << 16) | + (pwr_value << 24); } static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm) @@ -389,9 +389,9 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) if (thermal_val) { /* Query OFDM path A default setting */ - ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D; + ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D; for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { - if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) { + if (ele_d == (OFDMSwingTable[i] & bMaskOFDM_D)) { ofdm_index_old[0] = (u8)i; dm_odm->BbSwingIdxOfdmBase = (u8)i; break; @@ -472,18 +472,18 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) } } if (offset >= index_mapping_NUM_88E) - offset = index_mapping_NUM_88E-1; + offset = index_mapping_NUM_88E - 1; /* Updating ofdm_index values with new OFDM / CCK offset */ ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset]; - if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1) - ofdm_index[0] = OFDM_TABLE_SIZE_92D-1; + if (ofdm_index[0] > OFDM_TABLE_SIZE_92D - 1) + ofdm_index[0] = OFDM_TABLE_SIZE_92D - 1; else if (ofdm_index[0] < ofdm_min_index) ofdm_index[0] = ofdm_min_index; cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset]; - if (cck_index > CCK_TABLE_SIZE-1) - cck_index = CCK_TABLE_SIZE-1; + if (cck_index > CCK_TABLE_SIZE - 1) + cck_index = CCK_TABLE_SIZE - 1; else if (cck_index < 0) cck_index = 0; @@ -548,8 +548,8 @@ static u8 phy_path_a_iqk(struct adapter *adapt, bool config_pathb) reg_e9c = phy_query_bb_reg(adapt, rTx_Power_After_IQK_A, bMaskDWord); if (!(reg_eac & BIT(28)) && - (((reg_e94 & 0x03FF0000)>>16) != 0x142) && - (((reg_e9c & 0x03FF0000)>>16) != 0x42)) + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) result |= 0x01; return result; } @@ -600,13 +600,13 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB) reg_e9c = phy_query_bb_reg(adapt, rTx_Power_After_IQK_A, bMaskDWord); if (!(reg_eac & BIT(28)) && - (((reg_e94 & 0x03FF0000)>>16) != 0x142) && - (((reg_e9c & 0x03FF0000)>>16) != 0x42)) + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) result |= 0x01; else /* if Tx not OK, ignore Rx */ return result; - u4tmp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16); + u4tmp = 0x80007C00 | (reg_e94 & 0x3FF0000) | ((reg_e9c & 0x3FF0000) >> 16); phy_set_bb_reg(adapt, rTx_IQK, bMaskDWord, u4tmp); /* 1 RX IQK */ @@ -648,8 +648,8 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB) phy_set_rf_reg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180); if (!(reg_eac & BIT(27)) && /* if Tx is OK, check whether Rx is OK */ - (((reg_ea4 & 0x03FF0000)>>16) != 0x132) && - (((reg_eac & 0x03FF0000)>>16) != 0x36)) + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) result |= 0x02; else ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, @@ -677,15 +677,15 @@ static u8 phy_path_b_iqk(struct adapter *adapt) regecc = phy_query_bb_reg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord); if (!(regeac & BIT(31)) && - (((regeb4 & 0x03FF0000)>>16) != 0x142) && - (((regebc & 0x03FF0000)>>16) != 0x42)) + (((regeb4 & 0x03FF0000) >> 16) != 0x142) && + (((regebc & 0x03FF0000) >> 16) != 0x42)) result |= 0x01; else return result; if (!(regeac & BIT(30)) && - (((regec4 & 0x03FF0000)>>16) != 0x132) && - (((regecc & 0x03FF0000)>>16) != 0x36)) + (((regec4 & 0x03FF0000) >> 16) != 0x132) && + (((regecc & 0x03FF0000) >> 16) != 0x36)) result |= 0x02; else ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, @@ -711,7 +711,7 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], tx0_a = (x * oldval_0) >> 8; phy_set_bb_reg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, tx0_a); phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(31), - ((x * oldval_0>>7) & 0x1)); + ((x * oldval_0 >> 7) & 0x1)); y = result[final_candidate][1]; if ((y & 0x00000200) != 0) @@ -719,11 +719,11 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], tx0_c = (y * oldval_0) >> 8; phy_set_bb_reg(adapt, rOFDM0_XCTxAFE, 0xF0000000, - ((tx0_c&0x3C0)>>6)); + ((tx0_c & 0x3C0) >> 6)); phy_set_bb_reg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, - (tx0_c&0x3F)); + (tx0_c & 0x3F)); phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(29), - ((y * oldval_0>>7) & 0x1)); + ((y * oldval_0 >> 7) & 0x1)); if (txonly) return; @@ -757,7 +757,7 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], phy_set_bb_reg(adapt, rOFDM0_XBTxIQImbalance, 0x3FF, tx1_a); phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(27), - ((x * oldval_1>>7) & 0x1)); + ((x * oldval_1 >> 7) & 0x1)); y = result[final_candidate][5]; if ((y & 0x00000200) != 0) @@ -766,11 +766,11 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], tx1_c = (y * oldval_1) >> 8; phy_set_bb_reg(adapt, rOFDM0_XDTxAFE, 0xF0000000, - ((tx1_c&0x3C0)>>6)); + ((tx1_c & 0x3C0) >> 6)); phy_set_bb_reg(adapt, rOFDM0_XBTxIQImbalance, 0x003F0000, - (tx1_c&0x3F)); + (tx1_c & 0x3F)); phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(25), - ((y * oldval_1>>7) & 0x1)); + ((y * oldval_1 >> 7) & 0x1)); if (txonly) return; @@ -851,9 +851,9 @@ static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *ba usb_write8(adapt, mac_reg[i], 0x3F); for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) - usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(3)))); + usb_write8(adapt, mac_reg[i], (u8)(backup[i] & (~BIT(3)))); - usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(5)))); + usb_write8(adapt, mac_reg[i], (u8)(backup[i] & (~BIT(5)))); } static void path_a_standby(struct adapter *adapt) @@ -902,22 +902,22 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8], if (diff > MAX_TOLERANCE) { if ((i == 2 || i == 6) && !sim_bitmap) { - if (resulta[c1][i] + resulta[c1][i+1] == 0) - final_candidate[(i/4)] = c2; - else if (resulta[c2][i] + resulta[c2][i+1] == 0) - final_candidate[(i/4)] = c1; + if (resulta[c1][i] + resulta[c1][i + 1] == 0) + final_candidate[(i / 4)] = c2; + else if (resulta[c2][i] + resulta[c2][i + 1] == 0) + final_candidate[(i / 4)] = c1; else - sim_bitmap = sim_bitmap | (1<<i); + sim_bitmap = sim_bitmap | (1 << i); } else { - sim_bitmap = sim_bitmap | (1<<i); + sim_bitmap = sim_bitmap | (1 << i); } } } if (sim_bitmap == 0) { - for (i = 0; i < (bound/4); i++) { + for (i = 0; i < (bound / 4); i++) { if (final_candidate[i] != 0xFF) { - for (j = i*4; j < (i+1)*4-2; j++) + for (j = i * 4; j < (i + 1) * 4 - 2; j++) resulta[3][j] = resulta[final_candidate[i]][j]; result = false; } @@ -1038,9 +1038,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8], path_a_ok = phy_path_a_iqk(adapt, is2t); if (path_a_ok == 0x01) { result[t][0] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_A, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][1] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_A, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; break; } } @@ -1049,9 +1049,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8], path_a_ok = phy_path_a_rx_iqk(adapt, is2t); if (path_a_ok == 0x03) { result[t][2] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_A_2, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; break; } ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, @@ -1073,19 +1073,19 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8], path_b_ok = phy_path_b_iqk(adapt); if (path_b_ok == 0x03) { result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][6] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_B_2, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][7] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_B_2, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; break; } else if (i == (retry_count - 1) && path_b_ok == 0x01) { /* Tx IQK OK */ result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B, - bMaskDWord)&0x3FF0000)>>16; + bMaskDWord) & 0x3FF0000) >> 16; } } @@ -1138,12 +1138,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t) /* Check continuous TX and Packet TX */ tmpreg = usb_read8(adapt, 0xd03); - if ((tmpreg&0x70) != 0) - usb_write8(adapt, 0xd03, tmpreg&0x8F); + if ((tmpreg & 0x70) != 0) + usb_write8(adapt, 0xd03, tmpreg & 0x8F); else usb_write8(adapt, REG_TXPAUSE, 0xFF); - if ((tmpreg&0x70) != 0) { + if ((tmpreg & 0x70) != 0) { /* 1. Read original RF mode */ /* Path-A */ rf_a_mode = rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_AC, @@ -1157,12 +1157,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t) /* 2. Set RF mode = standby mode */ /* Path-A */ phy_set_rf_reg(adapt, RF_PATH_A, RF_AC, bMask12Bits, - (rf_a_mode&0x8FFFF)|0x10000); + (rf_a_mode & 0x8FFFF) | 0x10000); /* Path-B */ if (is2t) phy_set_rf_reg(adapt, RF_PATH_B, RF_AC, bMask12Bits, - (rf_b_mode&0x8FFFF)|0x10000); + (rf_b_mode & 0x8FFFF) | 0x10000); } /* 3. Read RF reg18 */ @@ -1170,12 +1170,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t) /* 4. Set LC calibration begin bit15 */ phy_set_rf_reg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits, - lc_cal|0x08000); + lc_cal | 0x08000); msleep(100); /* Restore original situation */ - if ((tmpreg&0x70) != 0) { + if ((tmpreg & 0x70) != 0) { /* Deal with continuous TX case */ /* Path-A */ usb_write8(adapt, 0xd03, tmpreg); diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c index 249cbc375074..77edd7ad19a1 100644 --- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c +++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c @@ -85,7 +85,7 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US) udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)); else - udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000); + udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000); break; case PWR_CMD_END: /* When this command is parsed, end the process */ diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c index 6fe4daea8fd5..00a9f692bb06 100644 --- a/drivers/staging/rtl8188eu/hal/rf.c +++ b/drivers/staging/rtl8188eu/hal/rf.c @@ -49,9 +49,9 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel) tx_agc[RF_PATH_B] = 0x3f3f3f3f; for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) { tx_agc[idx1] = powerlevel[idx1] | - (powerlevel[idx1]<<8) | - (powerlevel[idx1]<<16) | - (powerlevel[idx1]<<24); + (powerlevel[idx1] << 8) | + (powerlevel[idx1] << 16) | + (powerlevel[idx1] << 24); } } else { if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) { @@ -63,17 +63,17 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel) } else { for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) { tx_agc[idx1] = powerlevel[idx1] | - (powerlevel[idx1]<<8) | - (powerlevel[idx1]<<16) | - (powerlevel[idx1]<<24); + (powerlevel[idx1] << 8) | + (powerlevel[idx1] << 16) | + (powerlevel[idx1] << 24); } if (hal_data->EEPROMRegulatory == 0) { tmpval = hal_data->MCSTxPowerLevelOriginalOffset[0][6] + - (hal_data->MCSTxPowerLevelOriginalOffset[0][7]<<8); + (hal_data->MCSTxPowerLevelOriginalOffset[0][7] << 8); tx_agc[RF_PATH_A] += tmpval; tmpval = hal_data->MCSTxPowerLevelOriginalOffset[0][14] + - (hal_data->MCSTxPowerLevelOriginalOffset[0][15]<<24); + (hal_data->MCSTxPowerLevelOriginalOffset[0][15] << 24); tx_agc[RF_PATH_B] += tmpval; } } @@ -100,15 +100,15 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel) } /* rf-A cck tx power */ - tmpval = tx_agc[RF_PATH_A]&0xff; + tmpval = tx_agc[RF_PATH_A] & 0xff; phy_set_bb_reg(adapt, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval); - tmpval = tx_agc[RF_PATH_A]>>8; + tmpval = tx_agc[RF_PATH_A] >> 8; phy_set_bb_reg(adapt, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); /* rf-B cck tx power */ - tmpval = tx_agc[RF_PATH_B]>>24; + tmpval = tx_agc[RF_PATH_B] >> 24; phy_set_bb_reg(adapt, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval); - tmpval = tx_agc[RF_PATH_B]&0x00ffffff; + tmpval = tx_agc[RF_PATH_B] & 0x00ffffff; phy_set_bb_reg(adapt, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval); } @@ -124,9 +124,9 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm, for (i = 0; i < 2; i++) { powerbase0 = pwr_level_ofdm[i]; - powerbase0 = (powerbase0<<24) | (powerbase0<<16) | - (powerbase0<<8) | powerbase0; - *(ofdmbase+i) = powerbase0; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; } /* Check HT20 to HT40 diff */ if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) @@ -134,8 +134,8 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm, else powerlevel[0] = pwr_level_bw40[0]; powerbase1 = powerlevel[0]; - powerbase1 = (powerbase1<<24) | (powerbase1<<16) | - (powerbase1<<8) | powerbase1; + powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | + (powerbase1 << 8) | powerbase1; *mcs_base = powerbase1; } static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, @@ -157,7 +157,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, switch (regulatory) { case 0: chnlGroup = 0; - write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] + + write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); break; case 1: /* Realtek regulatory */ @@ -167,7 +167,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, if (hal_data->pwrGroupCnt >= hal_data->PGMaxGroup) Hal_GetChnlGroup88E(channel, &chnlGroup); - write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] + + write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); break; case 2: /* Better regulatory */ @@ -179,14 +179,14 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, chnlGroup = 0; if (index < 2) - pwr_diff = hal_data->TxPwrLegacyHtDiff[rf][channel-1]; + pwr_diff = hal_data->TxPwrLegacyHtDiff[rf][channel - 1]; else if (hal_data->CurrentChannelBW == HT_CHANNEL_WIDTH_20) - pwr_diff = hal_data->TxPwrHt20Diff[rf][channel-1]; + pwr_diff = hal_data->TxPwrHt20Diff[rf][channel - 1]; if (hal_data->CurrentChannelBW == HT_CHANNEL_WIDTH_40) - customer_pwr_limit = hal_data->PwrGroupHT40[rf][channel-1]; + customer_pwr_limit = hal_data->PwrGroupHT40[rf][channel - 1]; else - customer_pwr_limit = hal_data->PwrGroupHT20[rf][channel-1]; + customer_pwr_limit = hal_data->PwrGroupHT20[rf][channel - 1]; if (pwr_diff >= customer_pwr_limit) pwr_diff = 0; @@ -200,9 +200,9 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, if (pwr_diff_limit[i] > pwr_diff) pwr_diff_limit[i] = pwr_diff; } - customer_limit = (pwr_diff_limit[3]<<24) | - (pwr_diff_limit[2]<<16) | - (pwr_diff_limit[1]<<8) | + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); write_val = customer_limit + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); break; @@ -221,7 +221,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) write_val = 0x00000000; - *(out_val+rf) = write_val; + *(out_val + rf) = write_val; } } @@ -240,12 +240,12 @@ static void write_ofdm_pwr_reg(struct adapter *adapt, u8 index, u32 *pvalue) for (rf = 0; rf < 2; rf++) { write_val = pvalue[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8)((write_val & (0x7f<<(i*8)))>>(i*8)); + pwr_val[i] = (u8)((write_val & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } - write_val = (pwr_val[3]<<24) | (pwr_val[2]<<16) | - (pwr_val[1]<<8) | pwr_val[0]; + write_val = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c index 47b1bf5a6143..0b20e62f9a68 100644 --- a/drivers/staging/rtl8188eu/hal/rf_cfg.c +++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c @@ -15,7 +15,7 @@ static bool check_condition(struct adapter *adapt, const u32 condition) u32 _board = odm->BoardType; u32 _platform = odm->SupportPlatform; u32 _interface = odm->SupportInterface; - u32 cond = condition; + u32 cond; if (condition == 0xCDCDCDCD) return true; @@ -143,7 +143,7 @@ static u32 Array_RadioA_1T_8188E[] = { #define READ_NEXT_PAIR(v1, v2, i) \ do { \ i += 2; v1 = array[i]; \ - v2 = array[i+1]; \ + v2 = array[i + 1]; \ } while (0) #define RFREG_OFFSET_MASK 0xfffff @@ -190,7 +190,7 @@ static bool rtl88e_phy_config_rf_with_headerfile(struct adapter *adapt) for (i = 0; i < array_len; i += 2) { u32 v1 = array[i]; - u32 v2 = array[i+1]; + u32 v2 = array[i + 1]; if (v1 < 0xCDCDCDCD) { rtl8188e_config_rf_reg(adapt, v1, v2); diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c index 7646167a0b36..371e746915dd 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c @@ -113,24 +113,24 @@ void rtw_hal_add_ra_tid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_le struct odm_dm_struct *odmpriv = &pAdapter->HalData->odmpriv; u8 macid, init_rate, raid, shortGIrate = false; - macid = arg&0x1f; + macid = arg & 0x1f; - raid = (bitmap>>28) & 0x0f; + raid = (bitmap >> 28) & 0x0f; bitmap &= 0x0fffffff; if (rssi_level != DM_RATR_STA_INIT) bitmap = ODM_Get_Rate_Bitmap(odmpriv, macid, bitmap, rssi_level); - bitmap |= ((raid<<28)&0xf0000000); + bitmap |= ((raid << 28) & 0xf0000000); - init_rate = get_highest_rate_idx(bitmap&0x0fffffff)&0x3f; + init_rate = get_highest_rate_idx(bitmap & 0x0fffffff) & 0x3f; shortGIrate = (arg & BIT(5)) ? true : false; if (shortGIrate) init_rate |= BIT(6); - raid = (bitmap>>28) & 0x0f; + raid = (bitmap >> 28) & 0x0f; bitmap &= 0x0fffffff; @@ -172,7 +172,7 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode) break; } - H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps<<4)&0xf0) | (RLBM & 0x0f)); + H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps << 4) & 0xf0) | (RLBM & 0x0f)); H2CSetPwrMode.AwakeInterval = 1; @@ -239,9 +239,9 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength) pframe += 2; pktlen += 2; - if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) { + if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) { pktlen += cur_network->ie_length - sizeof(struct ndis_802_11_fixed_ie); - memcpy(pframe, cur_network->ies+sizeof(struct ndis_802_11_fixed_ie), pktlen); + memcpy(pframe, cur_network->ies + sizeof(struct ndis_802_11_fixed_ie), pktlen); goto _ConstructBeacon; } @@ -258,7 +258,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength) /* DS parameter set */ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen); - if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) { + if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) { u32 ATIMWindow; /* IBSS Parameter Set... */ ATIMWindow = 0; @@ -473,7 +473,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished) /* 3 (2) ps-poll *1 page */ RsvdPageLoc.LocPsPoll = PageNum; ConstructPSPoll(adapt, &ReservedPagePacket[BufIndex], &PSPollLength); - rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false); + rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], PSPollLength, true, false); PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength); PageNum += PageNeed; @@ -483,7 +483,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished) /* 3 (3) null data * 1 page */ RsvdPageLoc.LocNullData = PageNum; ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, pnetwork->MacAddress, false, 0, 0, false); - rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false); + rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], NullDataLength, false, false); PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength); PageNum += PageNeed; @@ -493,7 +493,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished) /* 3 (4) probe response * 1page */ RsvdPageLoc.LocProbeRsp = PageNum; ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, pnetwork->MacAddress, false); - rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false); + rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], ProbeRspLength, false, false); PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength); PageNum += PageNeed; @@ -504,7 +504,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished) RsvdPageLoc.LocQosNull = PageNum; ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &QosNullLength, pnetwork->MacAddress, true, 0, 0, false); - rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false); + rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], QosNullLength, false, false); PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength); PageNum += PageNeed; @@ -546,17 +546,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) if (mstatus == 1) { /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */ /* Suggested by filen. Added by tynli. */ - usb_write16(adapt, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid)); + usb_write16(adapt, REG_BCN_PSR_RPT, (0xC000 | pmlmeinfo->aid)); /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */ /* Set REG_CR bit 8. DMA beacon by SW. */ haldata->RegCR_1 |= BIT(0); - usb_write8(adapt, REG_CR+1, haldata->RegCR_1); + usb_write8(adapt, REG_CR + 1, haldata->RegCR_1); /* Disable Hw protection for a time which revserd for Hw sending beacon. */ /* Fix download reserved page packet fail that access collision with the protection time. */ /* 2010.05.11. Added by tynli. */ - usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL)&(~BIT(3))); + usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) & (~BIT(3))); usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) | BIT(4)); if (haldata->RegFwHwTxQCtrl & BIT(6)) { @@ -565,7 +565,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) } /* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */ - usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl&(~BIT(6)))); + usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6)))); haldata->RegFwHwTxQCtrl &= (~BIT(6)); /* Clear beacon valid check bit. */ @@ -582,7 +582,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* check rsvd page download OK. */ rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid)); poll++; - } while (!bcn_valid && (poll%10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped); + } while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped); } while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped); if (adapt->bSurpriseRemoved || adapt->bDriverStopped) @@ -600,7 +600,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* Enable Bcn */ usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) | BIT(3)); - usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL)&(~BIT(4))); + usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) & (~BIT(4))); /* To make sure that if there exists an adapter which would like to send beacon. */ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */ @@ -608,7 +608,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* the beacon cannot be sent by HW. */ /* 2010.06.23. Added by tynli. */ if (bSendBeacon) { - usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl | BIT(6))); + usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl | BIT(6))); haldata->RegFwHwTxQCtrl |= BIT(6); } @@ -621,6 +621,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus) /* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */ haldata->RegCR_1 &= (~BIT(0)); - usb_write8(adapt, REG_CR+1, haldata->RegCR_1); + usb_write8(adapt, REG_CR + 1, haldata->RegCR_1); } } diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c index 57ae0e83dd3e..740004d71a15 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c @@ -22,7 +22,7 @@ void iol_mode_enable(struct adapter *padapter, u8 enable) if (enable) { /* Enable initial offload */ reg_0xf0 = usb_read8(padapter, REG_SYS_CFG); - usb_write8(padapter, REG_SYS_CFG, reg_0xf0|SW_OFFLOAD_EN); + usb_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN); if (!padapter->bFWReady) { DBG_88E("bFWReady == false call reset 8051...\n"); @@ -42,9 +42,9 @@ s32 iol_execute(struct adapter *padapter, u8 control) u8 reg_0x88 = 0; unsigned long start = 0; - control = control&0x0f; + control = control & 0x0f; reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0); - usb_write8(padapter, REG_HMEBOX_E0, reg_0x88|control); + usb_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control); start = jiffies; while ((reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0)) & control && @@ -54,7 +54,7 @@ s32 iol_execute(struct adapter *padapter, u8 control) reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0); status = (reg_0x88 & control) ? _FAIL : _SUCCESS; - if (reg_0x88 & control<<4) + if (reg_0x88 & control << 4) status = _FAIL; return status; } @@ -64,7 +64,7 @@ static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy) s32 rst = _SUCCESS; iol_mode_enable(padapter, 1); - usb_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy); + usb_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy); rst = iol_execute(padapter, CMD_INIT_LLT); iol_mode_enable(padapter, 0); return rst; @@ -92,9 +92,9 @@ void _8051Reset88E(struct adapter *padapter) { u8 u1bTmp; - u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN+1); - usb_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp&(~BIT(2))); - usb_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp|(BIT(2))); + u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN + 1); + usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp & (~BIT(2))); + usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp | (BIT(2))); DBG_88E("=====> _8051Reset88E(): 8051 reset success .\n"); } @@ -122,7 +122,7 @@ void rtw_hal_read_chip_version(struct adapter *padapter) value32 = usb_read32(padapter, REG_SYS_CFG); ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP); ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC); - ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */ + ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */ dump_chip_info(ChipVersion); @@ -163,10 +163,10 @@ void rtw_hal_notch_filter(struct adapter *adapter, bool enable) { if (enable) { DBG_88E("Enable notch filter\n"); - usb_write8(adapter, rOFDM0_RxDSP+1, usb_read8(adapter, rOFDM0_RxDSP+1) | BIT(1)); + usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) | BIT(1)); } else { DBG_88E("Disable notch filter\n"); - usb_write8(adapter, rOFDM0_RxDSP+1, usb_read8(adapter, rOFDM0_RxDSP+1) & ~BIT(1)); + usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1)); } } @@ -308,7 +308,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF) pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; } - for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) { + for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) { pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++]; if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF) pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; @@ -319,7 +319,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF; } else { - pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4; + pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4; if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0; } @@ -327,7 +327,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF; } else { - pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f); + pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f); if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0; } @@ -337,7 +337,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF; } else { - pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4; + pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4; if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0; } @@ -345,7 +345,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF; } else { - pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f); + pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f); if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0; } @@ -354,7 +354,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF; } else { - pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4; + pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4; if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0; } @@ -362,7 +362,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, if (PROMContent[eeAddr] == 0xFF) { pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF; } else { - pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f); + pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f); if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */ pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0; } @@ -450,9 +450,9 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto /* 2010/10/19 MH Add Regulator recognize for CU. */ if (!AutoLoadFail) { - pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x7); /* bit0~2 */ + pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x7); /* bit0~2 */ if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF) - pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION&0x7); /* bit0~2 */ + pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION & 0x7); /* bit0~2 */ } else { pHalData->EEPROMRegulatory = 0; } @@ -532,9 +532,9 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool if (!AutoLoadFail) { /* Antenna Diversity setting. */ if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */ - pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3; + pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x18) >> 3; if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF) - pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3; + pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION & 0x18) >> 3; } else { pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */ } diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c index 0a900827c4fc..7d0135fde795 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c @@ -182,7 +182,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, rtl8188e_process_phy_info(padapter, precvframe); } } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) { - if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) { + if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) { if (psta) precvframe->psta = psta; } diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c index 2808f2b119bf..7d315bd438d4 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c @@ -58,12 +58,12 @@ void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u /* offset 0 */ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */ - ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */ + ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00ff0000); /* 32 bytes for TX Desc */ - ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */ + ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff); /* Buffer size + command header */ /* offset 4 */ - ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */ + ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00); /* Fixed queue of Mgnt queue */ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */ if (ispspoll) { @@ -91,16 +91,16 @@ static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxd /* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */ case _WEP40_: case _WEP104_: - ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000); + ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000); ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT); break; case _TKIP_: case _TKIP_WTMIC_: - ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000); + ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000); ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT); break; case _AES_: - ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000); + ptxdesc->txdw1 |= cpu_to_le32((0x03 << SEC_TYPE_SHT) & 0x00c00000); ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT); break; case _NO_PRIVACY_: @@ -127,7 +127,7 @@ static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw) *pdw |= cpu_to_le32(HW_RTS_EN); /* Set RTS BW */ if (pattrib->ht_en) { - *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0; + *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0; if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER) *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000); @@ -144,7 +144,7 @@ static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw) static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw) { if (pattrib->ht_en) { - *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0; + *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0; if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER) *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000); @@ -171,7 +171,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag if (adapt->registrypriv.mp_mode == 0) { if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) { - ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ); + ptxdesc = (struct tx_desc *)(pmem + PACKET_OFFSET_SZ); pull = 1; } } @@ -263,11 +263,11 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate)); } - } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) { + } else if ((pxmitframe->frame_tag & 0x0f) == MGNT_FRAMETAG) { /* offset 4 */ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f); - qsel = (uint)(pattrib->qsel&0x0000001f); + qsel = (uint)(pattrib->qsel & 0x0000001f); ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000); @@ -278,7 +278,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag ptxdesc->txdw2 |= cpu_to_le32(BIT(19)); /* offset 12 */ - ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000); + ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000); /* offset 20 */ ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */ @@ -288,7 +288,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate)); - } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) { + } else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG) { DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n"); } else { DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag); @@ -301,7 +301,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag /* offset 8 */ /* offset 12 */ - ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000); + ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0fff0000); /* offset 20 */ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate)); @@ -466,7 +466,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, /* 3 2. aggregate same priority and same DA(AP or STA) frames */ pfirstframe = pxmitframe; - len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ); + len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ); pbuf_tail = len; pbuf = round_up(pbuf_tail, 8); @@ -517,7 +517,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, pxmitframe->agg_num = 0; /* not first frame of aggregation */ pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */ - len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ); + len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ); if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) { pxmitframe->agg_num = 1; diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h index ba7e15fbde72..b9f11ef327e7 100644 --- a/drivers/staging/rtl8188eu/include/rtw_xmit.h +++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h @@ -112,7 +112,7 @@ struct pkt_attrib { u32 last_txcmdsz; u8 nr_frags; u8 encrypt; /* when 0 indicate no encrypt. when non-zero, - * indicate the encrypt algorith + * indicate the encrypt algorithm */ u8 iv_len; u8 icv_len; diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index 9b6ea86d1dcf..9a89791720e0 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -193,12 +193,12 @@ static char *translate_scan(struct adapter *padapter, /*Add basic and extended rates */ max_rate = 0; p = custom; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); while (pnetwork->network.SupportedRates[i] != 0) { rate = pnetwork->network.SupportedRates[i]&0x7F; if (rate > max_rate) max_rate = rate; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); i++; } @@ -2009,21 +2009,16 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) struct ieee_param *param; uint ret = 0; - if (p->length < sizeof(struct ieee_param) || !p->pointer) { - ret = -EINVAL; - goto out; - } + if (!p->pointer || p->length != sizeof(struct ieee_param)) + return -EINVAL; param = (struct ieee_param *)rtw_malloc(p->length); - if (!param) { - ret = -ENOMEM; - goto out; - } + if (!param) + return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { kfree(param); - ret = -EFAULT; - goto out; + return -EFAULT; } switch (param->cmd) { @@ -2054,9 +2049,6 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) ret = -EFAULT; kfree(param); - -out: - return ret; } @@ -2791,26 +2783,19 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) * so, we just check hw_init_completed */ - if (!padapter->hw_init_completed) { - ret = -EPERM; - goto out; - } + if (!padapter->hw_init_completed) + return -EPERM; - if (!p->pointer) { - ret = -EINVAL; - goto out; - } + if (!p->pointer || p->length != sizeof(struct ieee_param)) + return -EINVAL; param = (struct ieee_param *)rtw_malloc(p->length); - if (!param) { - ret = -ENOMEM; - goto out; - } + if (!param) + return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { kfree(param); - ret = -EFAULT; - goto out; + return -EFAULT; } switch (param->cmd) { @@ -2865,7 +2850,6 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; kfree(param); -out: return ret; } #endif diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c index 69d4b1d66b6f..4d6d0347ab8e 100644 --- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c +++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c @@ -26,18 +26,17 @@ void _rtw_init_queue(struct __queue *pqueue) struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv) { - struct net_device *pnetdev; + struct net_device *netdev; struct rtw_netdev_priv_indicator *pnpi; - pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4); - if (!pnetdev) - goto RETURN; + netdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4); + if (!netdev) + return NULL; - pnpi = netdev_priv(pnetdev); + pnpi = netdev_priv(netdev); pnpi->priv = old_priv; -RETURN: - return pnetdev; + return netdev; } void rtw_free_netdev(struct net_device *netdev) @@ -45,18 +44,15 @@ void rtw_free_netdev(struct net_device *netdev) struct rtw_netdev_priv_indicator *pnpi; if (!netdev) - goto RETURN; + return; pnpi = netdev_priv(netdev); if (!pnpi->priv) - goto RETURN; + return; vfree(pnpi->priv); free_netdev(netdev); - -RETURN: - return; } void rtw_buf_free(u8 **buf, u32 *buf_len) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index b5d42f411dd8..f7f09c0d273f 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -32,12 +32,14 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { /****** 8188EUS ********/ {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ + {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ + {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index 980b850d729a..ddcd7885d190 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c @@ -304,7 +304,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev) u16 i, usValue, IC_Version; u16 EEPROMId; - RT_TRACE(COMP_INIT, "====> _rtl92e_read_eeprom_info\n"); + RT_TRACE(COMP_INIT, "====> %s\n", __func__); EEPROMId = rtl92e_eeprom_read(dev, 0); if (EEPROMId != RTL8190_EEPROM_ID) { @@ -1354,8 +1354,8 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate) default: RT_TRACE(COMP_RECV, - "_rtl92e_rate_hw_to_mgn(): Non supportedRate [%x], bIsHT = %d!!!\n", - rate, bIsHT); + "%s: Non supportedRate [%x], bIsHT = %d!!!\n", + __func__, rate, bIsHT); break; } @@ -1415,8 +1415,8 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate) default: RT_TRACE(COMP_RECV, - "_rtl92e_rate_hw_to_mgn(): Non supported Rate [%x], bIsHT = %d!!!\n", - rate, bIsHT); + "%s: Non supported Rate [%x], bIsHT = %d!!!\n", + __func__, rate, bIsHT); break; } } diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c index 7d78f16efc1d..411138102948 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c @@ -1124,14 +1124,14 @@ static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev) priv->Record_CCK_20Mindex = 6; priv->CCK_index = priv->Record_CCK_20Mindex; RT_TRACE(COMP_POWER_TRACKING, - "20MHz, _rtl92e_cck_tx_power_track_bw_switch_thermal(),CCK_index = %d\n", + "20MHz, %s,CCK_index = %d\n", __func__, priv->CCK_index); break; case HT_CHANNEL_WIDTH_20_40: priv->CCK_index = priv->Record_CCK_40Mindex; RT_TRACE(COMP_POWER_TRACKING, - "40MHz, _rtl92e_cck_tx_power_track_bw_switch_thermal(), CCK_index = %d\n", + "40MHz, %s, CCK_index = %d\n", __func__, priv->CCK_index); break; } @@ -1155,7 +1155,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev) u8 regBwOpMode; RT_TRACE(COMP_SWBW, - "==>_rtl92e_set_bw_mode_work_item() Switch to %s bandwidth\n", + "==>%s Switch to %s bandwidth\n", __func__, priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ? "20MHz" : "40MHz"); @@ -1416,15 +1416,14 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, if (priv->SetRFPowerStateInProgress) return false; - RT_TRACE(COMP_PS, "===========> _rtl92e_set_rf_power_state()!\n"); + RT_TRACE(COMP_PS, "===========> %s!\n", __func__); priv->SetRFPowerStateInProgress = true; switch (priv->rf_chip) { case RF_8256: switch (eRFPowerState) { case eRfOn: - RT_TRACE(COMP_PS, - "_rtl92e_set_rf_power_state() eRfOn!\n"); + RT_TRACE(COMP_PS, "%s eRfOn!\n", __func__); if ((priv->rtllib->eRFPowerState == eRfOff) && RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; @@ -1490,10 +1489,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, } if (i >= MAX_DOZE_WAITING_TIMES_9x) { - RT_TRACE(COMP_POWER, - "\n\n\n TimeOut!! _rtl92e_set_rf_power_state(): eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n", - MAX_DOZE_WAITING_TIMES_9x, - QueueID); + RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! %s: eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n", + __func__, MAX_DOZE_WAITING_TIMES_9x, QueueID); break; } } @@ -1501,8 +1498,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, break; case eRfOff: - RT_TRACE(COMP_PS, - "_rtl92e_set_rf_power_state() eRfOff/Sleep !\n"); + RT_TRACE(COMP_PS, "%s eRfOff/Sleep !\n", __func__); for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) { ring = &priv->tx_ring[QueueID]; @@ -1567,9 +1563,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, } priv->SetRFPowerStateInProgress = false; - RT_TRACE(COMP_PS, - "<=========== _rtl92e_set_rf_power_state() bResult = %d!\n", - bResult); + RT_TRACE(COMP_PS, "<=========== %s bResult = %d!\n", __func__, bResult); return bResult; } @@ -1581,21 +1575,17 @@ bool rtl92e_set_rf_power_state(struct net_device *dev, bool bResult = false; RT_TRACE(COMP_PS, - "---------> rtl92e_set_rf_power_state(): eRFPowerState(%d)\n", - eRFPowerState); + "---------> %s: eRFPowerState(%d)\n", __func__, eRFPowerState); if (eRFPowerState == priv->rtllib->eRFPowerState && priv->bHwRfOffAction == 0) { - RT_TRACE(COMP_PS, - "<--------- rtl92e_set_rf_power_state(): discard the request for eRFPowerState(%d) is the same.\n", - eRFPowerState); + RT_TRACE(COMP_PS, "<--------- %s: discard the request for eRFPowerState(%d) is the same.\n", + __func__, eRFPowerState); return bResult; } bResult = _rtl92e_set_rf_power_state(dev, eRFPowerState); - RT_TRACE(COMP_PS, - "<--------- rtl92e_set_rf_power_state(): bResult(%d)\n", - bResult); + RT_TRACE(COMP_PS, "<--------- %s: bResult(%d)\n", __func__, bResult); return bResult; } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c index 627ea1029509..c8506517cc8d 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c @@ -108,8 +108,8 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex, } RT_TRACE(COMP_SEC, - "====>to rtl92e_set_key(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n", - dev, EntryNo, KeyIndex, KeyType, MacAddr); + "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n", + __func__, dev, EntryNo, KeyIndex, KeyType, MacAddr); if (DefaultKey) usConfig |= BIT15 | (KeyType<<2); @@ -163,7 +163,7 @@ void rtl92e_cam_restore(struct net_device *dev) 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - RT_TRACE(COMP_SEC, "rtl92e_cam_restore:\n"); + RT_TRACE(COMP_SEC, "%s:\n", __func__); if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) || diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 11183b9f757a..d3664e508cbe 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -145,21 +145,21 @@ bool rtl92e_set_rf_state(struct net_device *dev, unsigned long flag; RT_TRACE((COMP_PS | COMP_RF), - "===>rtl92e_set_rf_state(): StateToSet(%d)\n", StateToSet); + "===>%s: StateToSet(%d)\n", __func__, StateToSet); while (true) { spin_lock_irqsave(&priv->rf_ps_lock, flag); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flag); RT_TRACE((COMP_PS | COMP_RF), - "rtl92e_set_rf_state(): RF Change in progress! Wait to set..StateToSet(%d).\n", - StateToSet); + "%s: RF Change in progress! Wait to set..StateToSet(%d).\n", + __func__, StateToSet); while (priv->RFChangeInProgress) { RFWaitCounter++; RT_TRACE((COMP_PS | COMP_RF), - "rtl92e_set_rf_state(): Wait 1 ms (%d times)...\n", - RFWaitCounter); + "%s: Wait 1 ms (%d times)...\n", + __func__, RFWaitCounter); mdelay(1); if (RFWaitCounter > 100) { @@ -195,8 +195,8 @@ bool rtl92e_set_rf_state(struct net_device *dev, bConnectBySSID = true; } else { RT_TRACE((COMP_PS | COMP_RF), - "rtl92e_set_rf_state - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n", - priv->rtllib->RfOffReason, ChangeSource); + "%s - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n", + __func__, priv->rtllib->RfOffReason, ChangeSource); } break; @@ -232,8 +232,8 @@ bool rtl92e_set_rf_state(struct net_device *dev, if (bActionAllowed) { RT_TRACE((COMP_PS | COMP_RF), - "rtl92e_set_rf_state(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n", - StateToSet, priv->rtllib->RfOffReason); + "%s: Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n", + __func__, StateToSet, priv->rtllib->RfOffReason); PHY_SetRFPowerState(dev, StateToSet); if (StateToSet == eRfOn) { @@ -245,15 +245,15 @@ bool rtl92e_set_rf_state(struct net_device *dev, } } else { RT_TRACE((COMP_PS | COMP_RF), - "rtl92e_set_rf_state(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n", - StateToSet, ChangeSource, priv->rtllib->RfOffReason); + "%s: Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n", + __func__, StateToSet, ChangeSource, priv->rtllib->RfOffReason); } spin_lock_irqsave(&priv->rf_ps_lock, flag); priv->RFChangeInProgress = false; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); - RT_TRACE((COMP_PS | COMP_RF), "<===rtl92e_set_rf_state()\n"); + RT_TRACE((COMP_PS | COMP_RF), "<===%s\n", __func__); return bActionAllowed; } @@ -732,7 +732,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset) struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&priv->rtllib->PowerSaveControl); - bool init_status = true; + bool init_status; priv->bDriverIsGoingToUnload = false; priv->bdisable_nic = false; diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c index 816d31c1d5c7..2d5e4a0330c6 100644 --- a/drivers/staging/rtl8192e/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c @@ -119,8 +119,8 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst, } #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data, - skb->len); + print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data, + __func__, skb->len); #endif return skb; } @@ -170,8 +170,8 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst, tag += 2; #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("rtllib_DELBA(): ", DUMP_PREFIX_NONE, skb->data, - skb->len); + print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data, + __func__, skb->len); #endif return skb; } @@ -235,7 +235,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb) } #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("rtllib_rx_ADDBAReq(): ", DUMP_PREFIX_NONE, + print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, __func__, skb->data, skb->len); #endif @@ -433,8 +433,8 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb) } #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("rtllib_rx_DELBA(): ", DUMP_PREFIX_NONE, skb->data, - skb->len); + print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data, + __func__, skb->len); #endif delba = (struct rtllib_hdr_3addr *)skb->data; dst = (u8 *)(&delba->addr2[0]); diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c index f02263af9624..d83d72594312 100644 --- a/drivers/staging/rtl8192e/rtl819x_HTProc.c +++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c @@ -545,7 +545,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee) #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("HTOnAssocRsp(): ", DUMP_PREFIX_NONE, + print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, pPeerHTCap, sizeof(struct ht_capab_ele)); #endif HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth), diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c index 672bf0987943..47b2669a3a8e 100644 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c @@ -440,7 +440,7 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr) { struct ts_common_info *pTS, *pTmpTS; - netdev_info(ieee->dev, "===========>RemovePeerTS, %pM\n", Addr); + netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, Addr); list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 328f410daa03..b84f00b8d18b 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -728,14 +728,14 @@ struct rtllib_pspoll_hdr { struct rtllib_hdr { __le16 frame_ctl; __le16 duration_id; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_1addr { __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_2addr { @@ -743,7 +743,7 @@ struct rtllib_hdr_2addr { __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_3addr { @@ -753,7 +753,7 @@ struct rtllib_hdr_3addr { u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctl; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_4addr { @@ -764,7 +764,7 @@ struct rtllib_hdr_4addr { u8 addr3[ETH_ALEN]; __le16 seq_ctl; u8 addr4[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_3addrqos { @@ -775,7 +775,7 @@ struct rtllib_hdr_3addrqos { u8 addr3[ETH_ALEN]; __le16 seq_ctl; __le16 qos_ctl; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_hdr_4addrqos { @@ -787,13 +787,13 @@ struct rtllib_hdr_4addrqos { __le16 seq_ctl; u8 addr4[ETH_ALEN]; __le16 qos_ctl; - u8 payload[0]; + u8 payload[]; } __packed; struct rtllib_info_element { u8 id; u8 len; - u8 data[0]; + u8 data[]; } __packed; struct rtllib_authentication { @@ -802,7 +802,7 @@ struct rtllib_authentication { __le16 transaction; __le16 status; /*challenge*/ - struct rtllib_info_element info_element[0]; + struct rtllib_info_element info_element[]; } __packed; struct rtllib_disauth { @@ -818,7 +818,7 @@ struct rtllib_disassoc { struct rtllib_probe_request { struct rtllib_hdr_3addr header; /* SSID, supported rates */ - struct rtllib_info_element info_element[0]; + struct rtllib_info_element info_element[]; } __packed; struct rtllib_probe_response { @@ -829,7 +829,7 @@ struct rtllib_probe_response { /* SSID, supported rates, FH params, DS params, * CF params, IBSS params, TIM (if beacon), RSN */ - struct rtllib_info_element info_element[0]; + struct rtllib_info_element info_element[]; } __packed; /* Alias beacon for probe_response */ @@ -840,7 +840,7 @@ struct rtllib_assoc_request_frame { __le16 capability; __le16 listen_interval; /* SSID, supported rates, RSN */ - struct rtllib_info_element info_element[0]; + struct rtllib_info_element info_element[]; } __packed; struct rtllib_assoc_response_frame { @@ -848,7 +848,7 @@ struct rtllib_assoc_response_frame { __le16 capability; __le16 status; __le16 aid; - struct rtllib_info_element info_element[0]; /* supported rates */ + struct rtllib_info_element info_element[]; /* supported rates */ } __packed; struct rtllib_txb { @@ -859,7 +859,7 @@ struct rtllib_txb { u16 reserved; __le16 frag_size; __le16 payload_size; - struct sk_buff *fragments[0]; + struct sk_buff *fragments[]; }; #define MAX_SUBFRAME_COUNT 64 @@ -1792,7 +1792,7 @@ struct rtllib_device { /* This must be the last item so that it points to the data * allocated beyond this structure by alloc_rtllib */ - u8 priv[0]; + u8 priv[]; }; #define IEEE_A (1<<0) diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 0bae0a0a4cbe..d31b5e1c8df4 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -2092,7 +2092,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; - p += snprintf(p, sizeof(rates_str) - + p += scnprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); if (rtllib_is_ofdm_rate @@ -2120,7 +2120,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; - p += snprintf(p, sizeof(rates_str) - + p += scnprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates_ex[i]); if (rtllib_is_ofdm_rate diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c index 8cddb2e12dc4..79d7ad7c0a4a 100644 --- a/drivers/staging/rtl8192e/rtllib_tx.c +++ b/drivers/staging/rtl8192e/rtllib_tx.c @@ -241,7 +241,7 @@ static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu) return 0; #ifdef VERBOSE_DEBUG - print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data, + print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data, skb->len); #endif ip = ip_hdr(skb); diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c index beb40967936a..7e7df50164fb 100644 --- a/drivers/staging/rtl8192e/rtllib_wx.c +++ b/drivers/staging/rtl8192e/rtllib_wx.c @@ -114,7 +114,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, /* Add basic and extended rates */ max_rate = 0; p = custom; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); for (i = 0, j = 0; i < network->rates_len;) { if (j < network->rates_ex_len && ((network->rates_ex[j] & 0x7F) < @@ -124,12 +124,12 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, rate = network->rates[i++] & 0x7F; if (rate > max_rate) max_rate = rate; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); } for (; j < network->rates_ex_len; j++) { rate = network->rates_ex[j] & 0x7F; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); if (rate > max_rate) max_rate = rate; @@ -226,7 +226,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, */ iwe.cmd = IWEVCUSTOM; p = custom; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100)); iwe.u.data.length = p - custom; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index 9576b647f6b1..39f4ddd86796 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -886,14 +886,14 @@ enum ieee80211_mfie { struct rtl_80211_hdr { __le16 frame_ctl; __le16 duration_id; - u8 payload[0]; + u8 payload[]; } __packed; struct rtl_80211_hdr_1addr { __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtl_80211_hdr_2addr { @@ -901,7 +901,7 @@ struct rtl_80211_hdr_2addr { __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtl_80211_hdr_3addr { @@ -911,7 +911,7 @@ struct rtl_80211_hdr_3addr { u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctl; - u8 payload[0]; + u8 payload[]; } __packed; struct rtl_80211_hdr_4addr { @@ -922,7 +922,7 @@ struct rtl_80211_hdr_4addr { u8 addr3[ETH_ALEN]; __le16 seq_ctl; u8 addr4[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct rtl_80211_hdr_3addrqos { @@ -951,7 +951,7 @@ struct rtl_80211_hdr_4addrqos { struct ieee80211_info_element { u8 id; u8 len; - u8 data[0]; + u8 data[]; } __packed; struct ieee80211_authentication { @@ -960,7 +960,7 @@ struct ieee80211_authentication { __le16 transaction; __le16 status; /*challenge*/ - struct ieee80211_info_element info_element[0]; + struct ieee80211_info_element info_element[]; } __packed; struct ieee80211_disassoc { @@ -971,7 +971,7 @@ struct ieee80211_disassoc { struct ieee80211_probe_request { struct rtl_80211_hdr_3addr header; /* SSID, supported rates */ - struct ieee80211_info_element info_element[0]; + struct ieee80211_info_element info_element[]; } __packed; struct ieee80211_probe_response { @@ -982,7 +982,7 @@ struct ieee80211_probe_response { /* SSID, supported rates, FH params, DS params, * CF params, IBSS params, TIM (if beacon), RSN */ - struct ieee80211_info_element info_element[0]; + struct ieee80211_info_element info_element[]; } __packed; /* Alias beacon for probe_response */ @@ -993,7 +993,7 @@ struct ieee80211_assoc_request_frame { __le16 capability; __le16 listen_interval; /* SSID, supported rates, RSN */ - struct ieee80211_info_element info_element[0]; + struct ieee80211_info_element info_element[]; } __packed; struct ieee80211_reassoc_request_frame { @@ -1002,7 +1002,7 @@ struct ieee80211_reassoc_request_frame { __le16 listen_interval; u8 current_ap[ETH_ALEN]; /* SSID, supported rates, RSN */ - struct ieee80211_info_element info_element[0]; + struct ieee80211_info_element info_element[]; } __packed; struct ieee80211_assoc_response_frame { @@ -1010,7 +1010,7 @@ struct ieee80211_assoc_response_frame { __le16 capability; __le16 status; __le16 aid; - struct ieee80211_info_element info_element[0]; /* supported rates */ + struct ieee80211_info_element info_element[]; /* supported rates */ } __packed; struct ieee80211_txb { @@ -1021,7 +1021,7 @@ struct ieee80211_txb { u16 reserved; __le16 frag_size; __le16 payload_size; - struct sk_buff *fragments[0]; + struct sk_buff *fragments[]; }; #define MAX_TX_AGG_COUNT 16 @@ -2007,7 +2007,7 @@ struct ieee80211_device { /* This must be the last item so that it points to the data * allocated beyond this structure by alloc_ieee80211 */ - u8 priv[0]; + u8 priv[]; }; #define IEEE_A (1<<0) diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c index 6f4710171151..ffe624ed0c0c 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c @@ -388,20 +388,20 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) keyidx = pos[3]; if (!(keyidx & BIT(5))) { if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet without ExtIV" + netdev_dbg(skb->dev, "TKIP: received packet without ExtIV" " flag from %pM\n", hdr->addr2); } return -2; } keyidx >>= 6; if (tkey->key_idx != keyidx) { - printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame " + netdev_dbg(skb->dev, "TKIP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv); return -6; } if (!tkey->key_set) { if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet from %pM" + netdev_dbg(skb->dev, "TKIP: received packet from %pM" " with keyid=%d that does not have a configured" " key\n", hdr->addr2, keyidx); } @@ -417,7 +417,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" + netdev_dbg(skb->dev, "TKIP: replay detected: STA=%pM" " previous TSC %08x%04x received TSC " "%08x%04x\n", hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); @@ -445,7 +445,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) skcipher_request_zero(req); if (err) { if (net_ratelimit()) { - printk(KERN_DEBUG ": TKIP: failed to decrypt " + netdev_dbg(skb->dev, "TKIP: failed to decrypt " "received packet from %pM\n", hdr->addr2); } @@ -468,7 +468,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) tkey->rx_phase1_done = 0; } if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: ICV error detected: STA=" + netdev_dbg(skb->dev, "TKIP: ICV error detected: STA=" "%pM\n", hdr->addr2); } tkey->dot11RSNAStatsTKIPICVErrors++; @@ -559,7 +559,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri hdr = (struct rtl_80211_hdr_4addr *)skb->data; if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { - printk(KERN_DEBUG "Invalid packet for Michael MIC add " + netdev_dbg(skb->dev, "Invalid packet for Michael MIC add " "(tailroom=%d hdr_len=%d skb->len=%d)\n", skb_tailroom(skb), hdr_len, skb->len); return -1; @@ -628,10 +628,9 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, struct rtl_80211_hdr_4addr *hdr; hdr = (struct rtl_80211_hdr_4addr *)skb->data; - printk(KERN_DEBUG "%s: Michael MIC verification failed for " + netdev_dbg(skb->dev, "Michael MIC verification failed for " "MSDU from %pM keyidx=%d\n", - skb->dev ? skb->dev->name : "N/A", hdr->addr2, - keyidx); + hdr->addr2, keyidx); if (skb->dev) ieee80211_michael_mic_failure(skb->dev, hdr, keyidx); tkey->dot11RSNAStatsTKIPLocalMICFailures++; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c index aa9dab8a4ae8..a5a1b14f5a40 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c @@ -68,8 +68,7 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) sizeof(struct ieee80211_network), GFP_KERNEL); if (!ieee->networks) { - printk(KERN_WARNING "%s: Out of memory allocating beacons\n", - ieee->dev->name); + netdev_warn(ieee->dev, "Out of memory allocating beacons\n"); return -ENOMEM; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index 90692db81b71..d8eb907ff301 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -131,7 +131,7 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) *tag++ = 0x00; *tag_p = tag; - printk(KERN_ALERT "This is enable turbo mode IE process\n"); + netdev_alert(ieee->dev, "This is enable turbo mode IE process\n"); } #endif @@ -1270,14 +1270,15 @@ static void ieee80211_associate_step2(struct ieee80211_device *ieee) static void ieee80211_associate_complete_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq); - printk(KERN_INFO "Associated successfully\n"); + + netdev_info(ieee->dev, "Associated successfully\n"); if (ieee80211_is_54g(&ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)) { ieee->rate = 108; - printk(KERN_INFO"Using G rates:%d\n", ieee->rate); + netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate); } else { ieee->rate = 22; - printk(KERN_INFO"Using B rates:%d\n", ieee->rate); + netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate); } if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) { printk("Successfully associated, ht enabled\n"); @@ -1391,12 +1392,13 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE); ieee->current_network.ssid_len = tmp_ssid_len; - printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n", - ieee->current_network.ssid, - ieee->current_network.channel, - ieee->current_network.qos_data.supported, - ieee->pHTInfo->bEnableHT, - ieee->current_network.bssht.bdSupportHT); + netdev_info(ieee->dev, + "Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n", + ieee->current_network.ssid, + ieee->current_network.channel, + ieee->current_network.qos_data.supported, + ieee->pHTInfo->bEnableHT, + ieee->current_network.bssht.bdSupportHT); //ieee->pHTInfo->IOTAction = 0; HTResetIOTSetting(ieee->pHTInfo); @@ -1421,11 +1423,13 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee (ieee->modulation & IEEE80211_OFDM_MODULATION)) { ieee->rate = 108; ieee->SetWirelessMode(ieee->dev, IEEE_G); - printk(KERN_INFO"Using G rates\n"); + netdev_info(ieee->dev, + "Using G rates\n"); } else { ieee->rate = 22; ieee->SetWirelessMode(ieee->dev, IEEE_B); - printk(KERN_INFO"Using B rates\n"); + netdev_info(ieee->dev, + "Using B rates\n"); } memset(ieee->dot11HTOperationalRateSet, 0, 16); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); @@ -1622,7 +1626,7 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb) if (assoc_rq_parse(skb, dest) != -1) ieee80211_resp_to_assoc_rq(ieee, dest); - printk(KERN_INFO"New client associated: %pM\n", dest); + netdev_info(ieee->dev, "New client associated: %pM\n", dest); //FIXME } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c index b1baaa18b129..f434a26cdb2f 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c @@ -459,8 +459,8 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee, else ieee->raw_tx = 0; - printk(KERN_INFO"raw TX is %s\n", - ieee->raw_tx ? "enabled" : "disabled"); + netdev_info(ieee->dev, "raw TX is %s\n", + ieee->raw_tx ? "enabled" : "disabled"); if (ieee->iw_mode == IW_MODE_MONITOR) { if (prev == 0 && ieee->raw_tx) { diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c index f0b6b8372f91..0ee054d82832 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c @@ -180,9 +180,8 @@ int ieee80211_encrypt_fragment( struct rtl_80211_hdr_3addrqos *header; header = (struct rtl_80211_hdr_3addrqos *)frag->data; - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to %pM\n", - ieee->dev->name, header->addr1); + netdev_dbg(ieee->dev, "TKIP countermeasures: dropped " + "TX packet to %pM\n", header->addr1); } return -1; } @@ -204,8 +203,8 @@ int ieee80211_encrypt_fragment( atomic_dec(&crypt->refcnt); if (res < 0) { - printk(KERN_INFO "%s: Encryption failed: len=%d.\n", - ieee->dev->name, frag->len); + netdev_info(ieee->dev, "Encryption failed: len=%d.\n", + frag->len); ieee->ieee_stats.tx_discards++; return -1; } @@ -557,16 +556,15 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) || ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) { - printk(KERN_WARNING "%s: No xmit handler.\n", - ieee->dev->name); + netdev_warn(ieee->dev, "No xmit handler.\n"); goto success; } if (likely(ieee->raw_tx == 0)) { if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { - printk(KERN_WARNING "%s: skb too small (%d).\n", - ieee->dev->name, skb->len); + netdev_warn(ieee->dev, "skb too small (%d).\n", + skb->len); goto success; } @@ -685,8 +683,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) */ txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { - printk(KERN_WARNING "%s: Could not allocate TXB\n", - ieee->dev->name); + netdev_warn(ieee->dev, "Could not allocate TXB\n"); goto failed; } txb->encrypted = encrypt; @@ -779,15 +776,14 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) } } else { if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) { - printk(KERN_WARNING "%s: skb too small (%d).\n", - ieee->dev->name, skb->len); + netdev_warn(ieee->dev, "skb too small (%d).\n", + skb->len); goto success; } txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC); if (!txb) { - printk(KERN_WARNING "%s: Could not allocate TXB\n", - ieee->dev->name); + netdev_warn(ieee->dev, "Could not allocate TXB\n"); goto failed; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c index 33c596f9ec96..22373c0afebc 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c @@ -356,9 +356,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, kfree(new_crypt); new_crypt = NULL; - printk(KERN_WARNING "%s: could not initialize WEP: " - "load module ieee80211_crypt_wep\n", - dev->name); + netdev_warn(dev, "could not initialize WEP: " + "load module ieee80211_crypt_wep\n"); return -EOPNOTSUPP; } *crypt = new_crypt; @@ -436,7 +435,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(dev)) { - printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); + netdev_dbg(ieee->dev, "reset_port failed\n"); return -EINVAL; } return 0; diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c index 5cee1031a27c..3aabb401b15a 100644 --- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c @@ -380,7 +380,7 @@ bool GetTs( } IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr); - // Prepare TS Info releated field + // Prepare TS Info related field pTSInfo->uc_traffic_type = 0; // Traffic type: WMM is reserved in this field pTSInfo->uc_tsid = UP; // TSID pTSInfo->uc_direction = Dir; // Direction: if there is DirectLink, this need additional consideration. diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 89dd1fb0b38d..fcfb9024a83f 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -613,13 +613,13 @@ static void rtl8192_proc_init_one(struct net_device *dev) if (!dir) return; - proc_create_single("stats-rx", S_IFREG | S_IRUGO, dir, + proc_create_single("stats-rx", S_IFREG | 0444, dir, proc_get_stats_rx); - proc_create_single("stats-tx", S_IFREG | S_IRUGO, dir, + proc_create_single("stats-tx", S_IFREG | 0444, dir, proc_get_stats_tx); - proc_create_single("stats-ap", S_IFREG | S_IRUGO, dir, + proc_create_single("stats-ap", S_IFREG | 0444, dir, proc_get_stats_ap); - proc_create_single("registers", S_IFREG | S_IRUGO, dir, + proc_create_single("registers", S_IFREG | 0444, dir, proc_get_registers); } @@ -4877,50 +4877,50 @@ void EnableHWSecurityConfig8192(struct net_device *dev) write_nic_byte(dev, SECR, SECR_value); } -void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, - u8 *MacAddr, u8 DefaultKey, u32 *KeyContent) +void setKey(struct net_device *dev, u8 entryno, u8 keyindex, u16 keytype, + u8 *macaddr, u8 defaultkey, u32 *keycontent) { - u32 TargetCommand = 0; - u32 TargetContent = 0; - u16 usConfig = 0; + u32 target_command = 0; + u32 target_content = 0; + u16 us_config = 0; u8 i; - if (EntryNo >= TOTAL_CAM_ENTRY) - RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n"); + if (entryno >= TOTAL_CAM_ENTRY) + RT_TRACE(COMP_ERR, "cam entry exceeds in %s\n", __func__); RT_TRACE(COMP_SEC, - "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", - dev, EntryNo, KeyIndex, KeyType, MacAddr); + "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", + __func__, dev, entryno, keyindex, keytype, macaddr); - if (DefaultKey) - usConfig |= BIT(15) | (KeyType << 2); + if (defaultkey) + us_config |= BIT(15) | (keytype << 2); else - usConfig |= BIT(15) | (KeyType << 2) | KeyIndex; + us_config |= BIT(15) | (keytype << 2) | keyindex; for (i = 0; i < CAM_CONTENT_COUNT; i++) { - TargetCommand = i + CAM_CONTENT_COUNT * EntryNo; - TargetCommand |= BIT(31) | BIT(16); + target_command = i + CAM_CONTENT_COUNT * entryno; + target_command |= BIT(31) | BIT(16); if (i == 0) { /* MAC|Config */ - TargetContent = (u32)(*(MacAddr + 0)) << 16 | - (u32)(*(MacAddr + 1)) << 24 | - (u32)usConfig; + target_content = (u32)(*(macaddr + 0)) << 16 | + (u32)(*(macaddr + 1)) << 24 | + (u32)us_config; - write_nic_dword(dev, WCAMI, TargetContent); - write_nic_dword(dev, RWCAM, TargetCommand); + write_nic_dword(dev, WCAMI, target_content); + write_nic_dword(dev, RWCAM, target_command); } else if (i == 1) { /* MAC */ - TargetContent = (u32)(*(MacAddr + 2)) | - (u32)(*(MacAddr + 3)) << 8 | - (u32)(*(MacAddr + 4)) << 16 | - (u32)(*(MacAddr + 5)) << 24; - write_nic_dword(dev, WCAMI, TargetContent); - write_nic_dword(dev, RWCAM, TargetCommand); + target_content = (u32)(*(macaddr + 2)) | + (u32)(*(macaddr + 3)) << 8 | + (u32)(*(macaddr + 4)) << 16 | + (u32)(*(macaddr + 5)) << 24; + write_nic_dword(dev, WCAMI, target_content); + write_nic_dword(dev, RWCAM, target_command); } else { /* Key Material */ - if (KeyContent) { + if (keycontent) { write_nic_dword(dev, WCAMI, - *(KeyContent + i - 2)); - write_nic_dword(dev, RWCAM, TargetCommand); + *(keycontent + i - 2)); + write_nic_dword(dev, RWCAM, target_command); } } } diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index 0118edb0b9ab..100532598781 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -588,7 +588,7 @@ static int r8192_wx_set_enc(struct net_device *dev, hwkey); /* KeyContent */ } else { - printk("wrong type in WEP, not WEP40 and WEP104\n"); + netdev_warn(dev, "wrong type in WEP, not WEP40 and WEP104\n"); } } diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c index 555e52522be6..37b99cf4b35f 100644 --- a/drivers/staging/rtl8192u/r819xU_phy.c +++ b/drivers/staging/rtl8192u/r819xU_phy.c @@ -1531,7 +1531,7 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev) rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, - priv->nCur40MhzPrimeSC>>1); + priv->nCur40MhzPrimeSC >> 1); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig index b6dcda77db1f..c62747c90968 100644 --- a/drivers/staging/rtl8712/Kconfig +++ b/drivers/staging/rtl8712/Kconfig @@ -6,13 +6,16 @@ config R8712U select WEXT_PRIV select FW_LOADER help - This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130. + This option adds the Realtek RTL8712 USB device such as the + D-Link DWA-130. + If built as a module, it will be called r8712u. config R8712_TX_AGGR bool "Realtek RTL8712U Transmit Aggregation code" depends on R8712U && BROKEN help - This option provides transmit aggregation for the Realtek RTL8712 USB device. + This option provides transmit aggregation for the Realtek + RTL8712 USB device. diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h index 8098f6905554..dabaa8fd34fb 100644 --- a/drivers/staging/rtl8712/ieee80211.h +++ b/drivers/staging/rtl8712/ieee80211.h @@ -535,7 +535,7 @@ struct ieee80211_info_element_hdr { struct ieee80211_info_element { u8 id; u8 len; - u8 data[0]; + u8 data[]; } __packed; /* @@ -597,7 +597,7 @@ struct ieee80211_txb { u16 reserved; u16 frag_size; u16 payload_size; - struct sk_buff *fragments[0]; + struct sk_buff *fragments[]; }; /* SWEEP TABLE ENTRIES NUMBER*/ diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h index 98d7fbfce1a5..254182a6ce8e 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.h +++ b/drivers/staging/rtl8712/rtl871x_cmd.h @@ -478,7 +478,7 @@ struct drvint_cmd_parm { unsigned char *pbuf; }; -/*------------------- Below are used for RF/BB tunning ---------------------*/ +/*------------------- Below are used for RF/BB tuning ---------------------*/ struct setantenna_parm { u8 tx_antset; diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c index 1a39a96b726f..24020257bc58 100644 --- a/drivers/staging/rtl8712/rtl871x_mp.c +++ b/drivers/staging/rtl8712/rtl871x_mp.c @@ -44,9 +44,9 @@ static int init_mp_priv(struct mp_priv *pmp_priv) pmp_priv->pallocated_mp_xmitframe_buf = kmalloc(NR_MP_XMITFRAME * sizeof(struct mp_xmit_frame) + 4, GFP_ATOMIC); - if (!pmp_priv->pallocated_mp_xmitframe_buf) { + if (!pmp_priv->pallocated_mp_xmitframe_buf) return -ENOMEM; - } + pmp_priv->pmp_xmtframe_buf = pmp_priv->pallocated_mp_xmitframe_buf + 4 - ((addr_t)(pmp_priv->pallocated_mp_xmitframe_buf) & 3); diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h index 64e2ae436625..59fa6664d868 100644 --- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h +++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h @@ -48,7 +48,7 @@ struct eeprom_rw_param { struct EFUSE_ACCESS_STRUCT { u16 start_addr; u16 cnts; - u8 data[0]; + u8 data[]; }; struct burst_rw_reg { @@ -324,7 +324,7 @@ struct mp_ioctl_handler { struct mp_ioctl_param { unsigned int subcode; unsigned int len; - unsigned char data[0]; + unsigned char data[]; }; #define GEN_MP_IOCTL_SUBCODE(code) _MP_IOCTL_ ## code ## _CMD_ diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h index d479f739ff08..ca5072e11e22 100644 --- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h +++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h @@ -30,7 +30,7 @@ /*--------------------------Define Parameters-------------------------------*/ /*============================================================ - * 8192S Regsiter offset definition + * 8192S Register offset definition *============================================================ * * diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h index 0146a774e19d..e93f356ed2b0 100644 --- a/drivers/staging/rtl8712/rtl871x_recv.h +++ b/drivers/staging/rtl8712/rtl871x_recv.h @@ -53,7 +53,7 @@ struct rx_pkt_attrib { u8 privacy; /* in frame_ctrl field */ u8 bdecrypted; int hdrlen; /* the WLAN Header Len */ - int encrypt; /* 0 no encrypt. != 0 encrypt algorith */ + int encrypt; /* 0 no encrypt. != 0 encrypt algorithm */ int iv_len; int icv_len; int priority; @@ -105,7 +105,7 @@ struct recv_priv { u8 *precv_buf; /* 4 alignment */ struct __queue free_recv_buf_queue; u32 free_recv_buf_queue_cnt; - /* For the phy informatiom */ + /* For the phy information */ s8 rssi; u8 signal; u8 noise; diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index 7117d16a30f9..a76e81330756 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c @@ -2417,7 +2417,7 @@ void stop_ap_mode(struct adapter *padapter) paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list); plist = get_next(plist); - if (paclnode->valid == true) { + if (paclnode->valid) { paclnode->valid = false; list_del_init(&paclnode->list); diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c index 13a9b54b4561..efb5135ad743 100644 --- a/drivers/staging/rtl8723bs/core/rtw_cmd.c +++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c @@ -194,14 +194,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3); - pcmdpriv->cmd_issued_cnt = pcmdpriv->cmd_done_cnt = pcmdpriv->rsp_cnt = 0; + pcmdpriv->cmd_issued_cnt = 0; + pcmdpriv->cmd_done_cnt = 0; + pcmdpriv->rsp_cnt = 0; mutex_init(&pcmdpriv->sctx_mutex); exit: return res; } -static void c2h_wk_callback(_workitem *work); +static void c2h_wk_callback(_workitem * work); int rtw_init_evt_priv(struct evt_priv *pevtpriv) { /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ @@ -360,11 +362,7 @@ exit: struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv) { - struct cmd_obj *cmd_obj; - - cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue); - - return cmd_obj; + return _rtw_dequeue_cmd(&pcmdpriv->cmd_queue); } void rtw_free_cmd_obj(struct cmd_obj *pcmd) @@ -520,7 +518,7 @@ post_process: rtw_free_cmd_obj(pcmd); } else { /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */ - pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */ + pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */ } } else { RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode)); @@ -989,7 +987,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_ memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16); } - /* jeff: set this becasue at least sw key is ready */ + /* jeff: set this because at least sw key is ready */ padapter->securitypriv.busetkipkey = true; if (enqueue) { @@ -2138,7 +2136,8 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj * goto exit; } - psta->aid = psta->mac_id = passocsta_rsp->cam_id; + psta->aid = passocsta_rsp->cam_id; + psta->mac_id = passocsta_rsp->cam_id; spin_lock_bh(&pmlmepriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c index 3b8848182221..8794638468e6 100644 --- a/drivers/staging/rtl8723bs/core/rtw_efuse.c +++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c @@ -43,9 +43,8 @@ Efuse_Read1ByteFromFakeContent( u16 Offset, u8 *Value) { - if (Offset >= EFUSE_MAX_HW_SIZE) { + if (Offset >= EFUSE_MAX_HW_SIZE) return false; - } /* DbgPrint("Read fake content, offset = %d\n", Offset); */ if (fakeEfuseBank == 0) *Value = fakeEfuseContent[Offset]; @@ -65,14 +64,12 @@ Efuse_Write1ByteToFakeContent( u16 Offset, u8 Value) { - if (Offset >= EFUSE_MAX_HW_SIZE) { + if (Offset >= EFUSE_MAX_HW_SIZE) return false; - } if (fakeEfuseBank == 0) fakeEfuseContent[Offset] = Value; - else { + else fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value; - } return true; } @@ -244,10 +241,8 @@ u16 Address) while (!(Bytetemp & 0x80)) { Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3); k++; - if (k == 1000) { - k = 0; + if (k == 1000) break; - } } return rtw_read8(Adapter, EFUSE_CTRL); } else @@ -271,8 +266,7 @@ bool bPseudoTest) /* DBG_871X("===> EFUSE_OneByteRead() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */ if (bPseudoTest) { - bResult = Efuse_Read1ByteFromFakeContent(padapter, addr, data); - return bResult; + return Efuse_Read1ByteFromFakeContent(padapter, addr, data); } /* <20130121, Kordan> For SMIC EFUSE specificatoin. */ @@ -324,8 +318,7 @@ bool bPseudoTest) /* DBG_871X("===> EFUSE_OneByteWrite() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */ if (bPseudoTest) { - bResult = Efuse_Write1ByteToFakeContent(padapter, addr, data); - return bResult; + return Efuse_Write1ByteToFakeContent(padapter, addr, data); } diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c index 6018d877a8a6..ca98274ae390 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c +++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c @@ -217,7 +217,7 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u * rtw_ies_remove_ie - Find matching IEs and remove * @ies: Address of IEs to search * @ies_len: Pointer of length of ies, will update to new length - * @offset: The offset to start scarch + * @offset: The offset to start search * @eid: Element ID to match * @oui: OUI to match * @oui_len: OUI length diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c index 57168578663a..c3f63f903547 100644 --- a/drivers/staging/rtl8723bs/core/rtw_io.c +++ b/drivers/staging/rtl8723bs/core/rtw_io.c @@ -37,7 +37,6 @@ jackson@realtek.com.tw u8 _rtw_read8(struct adapter *adapter, u32 addr) { - u8 r_val; /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */ struct io_priv *pio_priv = &adapter->iopriv; struct intf_hdl *pintfhdl = &(pio_priv->intf); @@ -45,8 +44,7 @@ u8 _rtw_read8(struct adapter *adapter, u32 addr) _read8 = pintfhdl->io_ops._read8; - r_val = _read8(pintfhdl, addr); - return r_val; + return _read8(pintfhdl, addr); } u16 _rtw_read16(struct adapter *adapter, u32 addr) @@ -142,13 +140,10 @@ u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem) u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); struct io_priv *pio_priv = &adapter->iopriv; struct intf_hdl *pintfhdl = &(pio_priv->intf); - u32 ret; _write_port = pintfhdl->io_ops._write_port; - ret = _write_port(pintfhdl, addr, cnt, pmem); - - return ret; + return _write_port(pintfhdl, addr, cnt, pmem); } int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops)) diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c index eb08569db5ea..8b5f6a66bfb8 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c +++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c @@ -439,7 +439,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { - rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */ + rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */ } } diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index 71fcb466019a..d7a58af76ea0 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c @@ -2772,16 +2772,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe /* maybe needs check if ap supports rx ampdu. */ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) { - if (pregistrypriv->wifi_spec == 1) { - /* remove this part because testbed AP should disable RX AMPDU */ - /* phtpriv->ampdu_enable = false; */ - phtpriv->ampdu_enable = true; - } else { - phtpriv->ampdu_enable = true; - } - } else if (pregistrypriv->ampdu_enable == 2) { - /* remove this part because testbed AP should disable RX AMPDU */ - /* phtpriv->ampdu_enable = true; */ + phtpriv->ampdu_enable = true; } /* check Max Rx A-MPDU Size */ diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index c642825ca8ef..8f9da1d49343 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -219,6 +219,7 @@ static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch) { int i; + for (i = 0; ch_set[i].ChannelNum != 0; i++) { if (ch == ch_set[i].ChannelNum) break; @@ -2184,6 +2185,7 @@ unsigned int OnAction_sa_query(struct adapter *padapter, union recv_frame *precv } if (0) { int pp; + printk("pattrib->pktlen = %d =>", pattrib->pkt_len); for (pp = 0; pp < pattrib->pkt_len; pp++) printk(" %02x ", pframe[pp]); @@ -2486,6 +2488,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms) /* DBG_871X("ie len =%d\n", cur_network->IELength); */ { int len_diff; + memcpy(pframe, cur_network->IEs, cur_network->IELength); len_diff = update_hidden_ssid( pframe+_BEACON_IE_OFFSET_ @@ -2500,6 +2503,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms) u8 *wps_ie; uint wps_ielen; u8 sr = 0; + wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_, pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen); if (wps_ie && wps_ielen > 0) { @@ -2700,6 +2704,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p if (ssid_ie && cur_network->Ssid.SsidLength) { uint remainder_ielen; u8 *remainder_ie; + remainder_ie = ssid_ie+2; remainder_ielen = (pframe-remainder_ie); @@ -4280,6 +4285,7 @@ void site_survey(struct adapter *padapter) { struct rtw_ieee80211_channel *ch; + if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) { ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx]; survey_channel = ch->hw_value; @@ -4323,6 +4329,7 @@ void site_survey(struct adapter *padapter) if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */ { int i; + for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) { if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) { /* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */ @@ -4351,6 +4358,7 @@ void site_survey(struct adapter *padapter) #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR) { struct noise_info info; + info.bPauseDIG = false; info.IGIValue = 0; info.max_time = channel_scan_time_ms/2;/* ms */ @@ -4518,6 +4526,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset); if (p) { struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2); + bssid->Configuration.DSConfig = HT_info->primary_channel; } else { /* use current channel */ bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter); @@ -4551,6 +4560,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset); if (p && len > 0) { struct HT_caps_element *pHT_caps; + pHT_caps = (struct HT_caps_element *)(p + 2); if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & BIT(14)) @@ -4584,6 +4594,7 @@ void start_create_ibss(struct adapter *padapter) struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network)); + pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig; pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork); @@ -5388,6 +5399,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter) /* wakeup macid after disconnect. */ { struct sta_info *psta; + psta = rtw_get_stainfo(&padapter->stapriv, get_my_bssid(pnetwork)); if (psta) rtw_hal_macid_wakeup(padapter, psta->mac_id); @@ -5425,6 +5437,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res) struct sta_priv *pstapriv = &padapter->stapriv; u8 join_type; struct sta_info *psta; + if (join_res < 0) { join_type = 1; rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type)); @@ -6047,6 +6060,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf) if (pmlmeinfo->state == WIFI_FW_AP_STATE) { struct wlan_bssid_ex *network = &padapter->mlmepriv.cur_network.network; + start_bss_network(padapter, (u8 *)network); return H2C_SUCCESS; } @@ -6810,6 +6824,7 @@ u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf) if ((padapter->rtw_wdev != NULL) && (padapter->rtw_wdev->wiphy)) { struct regulatory_request request; + request.initiator = NL80211_REGDOM_SET_BY_DRIVER; rtw_reg_notifier(padapter->rtw_wdev->wiphy, &request); } diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c index 30137f0bd984..6ac9184d59a6 100644 --- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c +++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c @@ -1193,7 +1193,7 @@ inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms) /* * rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend * @adapter: pointer to struct adapter structure -* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup +* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup * Return _SUCCESS or _FAIL */ @@ -1390,10 +1390,5 @@ void rtw_ps_deny_cancel(struct adapter *padapter, enum PS_DENY_REASON reason) */ u32 rtw_ps_deny_get(struct adapter *padapter) { - u32 deny; - - - deny = adapter_to_pwrctl(padapter)->ps_deny; - - return deny; + return adapter_to_pwrctl(padapter)->ps_deny; } diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 7fa8c84cf5f4..5245098b9ecf 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -1179,7 +1179,7 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ /* DBG_871X("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */ - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon(padapter, _TIM_IE_, NULL, true); } @@ -1205,7 +1205,7 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ pstapriv->tim_bitmap &= ~BIT(psta->aid); - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon(padapter, _TIM_IE_, NULL, true); } @@ -1953,7 +1953,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe) for (i = 0; i < nr_subframes; i++) { sub_pkt = subframes[i]; - /* Indicat the packets to upper layer */ + /* Indicate the packets to upper layer */ if (sub_pkt) { rtw_os_recv_indicate_pkt(padapter, sub_pkt, &prframe->u.hdr.attrib); } @@ -2606,14 +2606,14 @@ static void rtw_signal_stat_timer_hdl(struct timer_list *t) if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */ avg_signal_strength = recvpriv->signal_strength_data.avg_val; num_signal_strength = recvpriv->signal_strength_data.total_num; - /* after avg_vals are accquired, we can re-stat the signal values */ + /* after avg_vals are acquired, we can re-stat the signal values */ recvpriv->signal_strength_data.update_req = 1; } if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */ avg_signal_qual = recvpriv->signal_qual_data.avg_val; num_signal_qual = recvpriv->signal_qual_data.total_num; - /* after avg_vals are accquired, we can re-stat the signal values */ + /* after avg_vals are acquired, we can re-stat the signal values */ recvpriv->signal_qual_data.update_req = 1; } diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c index 9c4607114cea..5ebf691bd743 100644 --- a/drivers/staging/rtl8723bs/core/rtw_security.c +++ b/drivers/staging/rtl8723bs/core/rtw_security.c @@ -434,7 +434,6 @@ void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_cod rtw_secmicappend(&micdata, &header[16], 6); else rtw_secmicappend(&micdata, &header[10], 6); - } rtw_secmicappend(&micdata, &priority[0], 4); @@ -723,7 +722,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe) TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra); } - } return res; } @@ -829,11 +827,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo == NULL!!!\n", __func__)); res = _FAIL; } - } exit: return res; - } @@ -1219,7 +1215,6 @@ static void construct_mic_header2( if (!qc_exists && a4_exists) { for (i = 0; i < 6; i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */ - } if (qc_exists && !a4_exists) { @@ -1234,7 +1229,6 @@ static void construct_mic_header2( mic_header2[14] = mpdu[30] & 0x0f; mic_header2[15] = mpdu[31] & 0x00; } - } /************************************************/ @@ -1413,7 +1407,6 @@ static sint aes_cipher(u8 *key, uint hdrlen, } bitwise_xor(aes_out, padded_buffer, chain_buffer); aes128k128d(key, chain_buffer, aes_out); - } for (j = 0 ; j < 8; j++) @@ -1719,7 +1712,6 @@ static sint aes_decipher(u8 *key, uint hdrlen, } bitwise_xor(aes_out, padded_buffer, chain_buffer); aes128k128d(key, chain_buffer, aes_out); - } for (j = 0; j < 8; j++) diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index 9590e6f351c1..110338dbe372 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -326,20 +326,20 @@ inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch) dvobj->on_oper_ch_time = jiffies; #ifdef DBG_CH_SWITCH - cnt += snprintf(msg+cnt, len-cnt, "switch to ch %3u", ch); + cnt += scnprintf(msg+cnt, len-cnt, "switch to ch %3u", ch); for (i = 0; i < dvobj->iface_nums; i++) { struct adapter *iface = dvobj->padapters[i]; - cnt += snprintf(msg+cnt, len-cnt, " ["ADPT_FMT":", ADPT_ARG(iface)); + cnt += scnprintf(msg+cnt, len-cnt, " ["ADPT_FMT":", ADPT_ARG(iface)); if (iface->mlmeextpriv.cur_channel == ch) - cnt += snprintf(msg+cnt, len-cnt, "C"); + cnt += scnprintf(msg+cnt, len-cnt, "C"); else - cnt += snprintf(msg+cnt, len-cnt, "_"); + cnt += scnprintf(msg+cnt, len-cnt, "_"); if (iface->wdinfo.listen_channel == ch && !rtw_p2p_chk_state(&iface->wdinfo, P2P_STATE_NONE)) - cnt += snprintf(msg+cnt, len-cnt, "L"); + cnt += scnprintf(msg+cnt, len-cnt, "L"); else - cnt += snprintf(msg+cnt, len-cnt, "_"); - cnt += snprintf(msg+cnt, len-cnt, "]"); + cnt += scnprintf(msg+cnt, len-cnt, "_"); + cnt += scnprintf(msg+cnt, len-cnt, "]"); } DBG_871X(FUNC_ADPT_FMT" %s\n", FUNC_ADPT_ARG(adapter), msg); @@ -1503,7 +1503,7 @@ void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, stru switch (pIE->ElementID) { case _VENDOR_SPECIFIC_IE_: - /* to update WMM paramter set while receiving beacon */ + /* to update WMM parameter set while receiving beacon */ if (!memcmp(pIE->data, WMM_PARA_OUI, 6) && pIE->Length == WLAN_WMM_LEN) /* WMM */ if (WMM_param_handler(padapter, pIE)) report_wmm_edca_update(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index fdb585ff5925..571353404a95 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1180,7 +1180,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram mpdu_len -= pattrib->icv_len; if (bmcst) { - /* don't do fragment to broadcat/multicast packets */ + /* don't do fragment to broadcast/multicast packets */ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen); } else { mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len); @@ -2303,7 +2303,7 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr pstapriv->tim_bitmap |= BIT(psta->aid); if (update_tim) - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ update_beacon(padapter, _TIM_IE_, NULL, true); } diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h index ce02457922b7..b9aca99478db 100644 --- a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h +++ b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h @@ -415,13 +415,13 @@ #define IMR_BCNDMAINT3_8723B BIT23 /* Beacon DMA Interrupt 3 */ #define IMR_BCNDMAINT2_8723B BIT22 /* Beacon DMA Interrupt 2 */ #define IMR_BCNDMAINT1_8723B BIT21 /* Beacon DMA Interrupt 1 */ -#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrup 7 */ -#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrup 6 */ -#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrup 5 */ -#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrup 4 */ -#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrup 3 */ -#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrup 2 */ -#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrup 1 */ +#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrupt 7 */ +#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrupt 6 */ +#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrupt 5 */ +#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrupt 4 */ +#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrupt 3 */ +#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrupt 2 */ +#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrupt 1 */ #define IMR_ATIMEND_E_8723B BIT13 /* ATIM Window End Extension for Win7 */ #define IMR_TXERR_8723B BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */ #define IMR_RXERR_8723B BIT10 /* Rx Error Flag INT Status, Write 1 clear */ diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c index dd349c506da8..c60e8c58d9cc 100644 --- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c +++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c @@ -1807,7 +1807,7 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl( result = 0; WaitCount = 0; } else { - /* accquire the BT TRx retry count from BT_Info byte2 */ + /* acquire the BT TRx retry count from BT_Info byte2 */ retryCount = pCoexSta->btRetryCnt; btInfoExt = pCoexSta->btInfoExt; /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); */ diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c index 02da0a883594..2779dba92bab 100644 --- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c +++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c @@ -1610,8 +1610,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust( HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(13); else if (maxInterval == 2) HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(14); - else if (maxInterval == 3) - HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(15); else HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(15); } else { @@ -1619,8 +1617,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust( HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(9); else if (maxInterval == 2) HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(10); - else if (maxInterval == 3) - HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(11); else HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(11); } @@ -1630,8 +1626,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust( HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(5); else if (maxInterval == 2) HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(6); - else if (maxInterval == 3) - HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(7); else HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(7); } else { @@ -1639,8 +1633,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust( HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(1); else if (maxInterval == 2) HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(2); - else if (maxInterval == 3) - HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(3); else HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(3); } @@ -1654,7 +1646,7 @@ static void halbtc8723b2ant_TdmaDurationAdjust( result = 0; WaitCount = 0; } else { - /* accquire the BT TRx retry count from BT_Info byte2 */ + /* acquire the BT TRx retry count from BT_Info byte2 */ retryCount = pCoexSta->btRetryCnt; BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); BTC_PRINT( diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h index 7150d54d49ab..c758d143c57f 100644 --- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h +++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h @@ -44,14 +44,14 @@ #define BTC_ANT_WIFI_AT_CPL_MAIN 0 #define BTC_ANT_WIFI_AT_CPL_AUX 1 -typedef enum _BTC_POWERSAVE_TYPE{ +typedef enum _BTC_POWERSAVE_TYPE { BTC_PS_WIFI_NATIVE = 0, /* wifi original power save behavior */ BTC_PS_LPS_ON = 1, BTC_PS_LPS_OFF = 2, BTC_PS_MAX } BTC_POWERSAVE_TYPE, *PBTC_POWERSAVE_TYPE; -typedef enum _BTC_BT_REG_TYPE{ +typedef enum _BTC_BT_REG_TYPE { BTC_BT_REG_RF = 0, BTC_BT_REG_MODEM = 1, BTC_BT_REG_BLUEWIZE = 2, @@ -60,7 +60,7 @@ typedef enum _BTC_BT_REG_TYPE{ BTC_BT_REG_MAX } BTC_BT_REG_TYPE, *PBTC_BT_REG_TYPE; -typedef enum _BTC_CHIP_INTERFACE{ +typedef enum _BTC_CHIP_INTERFACE { BTC_INTF_UNKNOWN = 0, BTC_INTF_PCI = 1, BTC_INTF_USB = 2, diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c index 357802db9aed..7b435840746d 100644 --- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c +++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c @@ -92,7 +92,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter) u8 *deltaSwingTableIdx_TUP_B; u8 *deltaSwingTableIdx_TDOWN_B; - /* 4 2. Initilization (7 steps in total) */ + /* 4 2. Initialization (7 steps in total) */ ConfigureTxpowerTrack(pDM_Odm, &c); @@ -213,7 +213,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter) /* 3 7. If necessary, move the index of swing table to adjust Tx power. */ if (delta > 0 && pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) { - /* delta" here is used to record the absolute value of differrence. */ + /* delta" here is used to record the absolute value of difference. */ delta = ThermalValue > pHalData->EEPROMThermalMeter ? (ThermalValue - pHalData->EEPROMThermalMeter) : diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c index c24156873fed..3b34a516075f 100644 --- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c +++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c @@ -118,7 +118,7 @@ u8 HalPwrSeqCmdParsing( &GET_PWR_CFG_MASK(PwrCfgCmd) ); - /* Write the value back to sytem register */ + /* Write the value back to system register */ rtw_write8(padapter, offset, value); } break; diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c index 109bd85b0cd8..02676da5c166 100644 --- a/drivers/staging/rtl8723bs/hal/hal_com.c +++ b/drivers/staging/rtl8723bs/hal/hal_com.c @@ -961,10 +961,7 @@ exit: u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type) { - - u8 raid; - raid = (network_type & WIRELESS_11B) ? RATEID_IDX_B : RATEID_IDX_G; - return raid; + return (network_type & WIRELESS_11B) ? RATEID_IDX_B : RATEID_IDX_G; } void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta) diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c index eb7de3617d83..767e2a784f78 100644 --- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c +++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c @@ -1498,9 +1498,7 @@ s8 PHY_GetTxPowerByRate( return value; } - value = pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex]; - - return value; + return pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex]; } diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c index 7d8f21f32fb9..23df729acb7b 100644 --- a/drivers/staging/rtl8723bs/hal/hal_intf.c +++ b/drivers/staging/rtl8723bs/hal/hal_intf.c @@ -365,7 +365,7 @@ void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter) { if (adapter_to_pwrctl(padapter)->bFwCurrentInPSMode == true) { if (padapter->HalFunc.hal_dm_watchdog_in_lps) - padapter->HalFunc.hal_dm_watchdog_in_lps(padapter); /* this fuction caller is in interrupt context */ + padapter->HalFunc.hal_dm_watchdog_in_lps(padapter); /* this function caller is in interrupt context */ } } diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h index fba3b9e1491b..b77d1fe33a28 100644 --- a/drivers/staging/rtl8723bs/hal/odm.h +++ b/drivers/staging/rtl8723bs/hal/odm.h @@ -849,7 +849,7 @@ typedef struct _ODM_PATH_DIVERSITY_ { u32 PathB_Cnt[ODM_ASSOCIATE_ENTRY_NUM]; } PATHDIV_T, *pPATHDIV_T; -typedef enum _BASEBAND_CONFIG_PHY_REG_PG_VALUE_TYPE{ +typedef enum _BASEBAND_CONFIG_PHY_REG_PG_VALUE_TYPE { PHY_REG_PG_RELATIVE_VALUE = 0, PHY_REG_PG_EXACT_VALUE = 1 } PHY_REG_PG_TYPE; diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c index 95edd148ac24..3ea1972545e5 100644 --- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c +++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c @@ -40,16 +40,11 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap) static u8 odm_GetDefaultCrytaltalCap(void *pDM_VOID) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; - u8 CrystalCap = 0x20; struct adapter *Adapter = pDM_Odm->Adapter; struct hal_com_data *pHalData = GET_HAL_DATA(Adapter); - CrystalCap = pHalData->CrystalCap; - - CrystalCap = CrystalCap & 0x3f; - - return CrystalCap; + return pHalData->CrystalCap & 0x3f; } static void odm_SetATCStatus(void *pDM_VOID, bool ATCStatus) @@ -303,7 +298,7 @@ void ODM_CfoTracking(void *pDM_VOID) void ODM_ParsingCFO(void *pDM_VOID, void *pPktinfo_VOID, s8 *pcfotail) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; - struct odm_packet_info *pPktinfo = (struct odm_packet_info *)pPktinfo_VOID; + struct odm_packet_info *pPktinfo = pPktinfo_VOID; PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack; u8 i; diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c index 71919a3d81ab..9c190b1024d8 100644 --- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c +++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c @@ -103,10 +103,10 @@ static void odm_RxPhyStatus92CSeries_Parsing( pDM_Odm->PhyDbgInfo.NumQryPhyStatusCCK++; /* */ /* (1)Hardware does not provide RSSI for CCK */ - /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */ + /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */ /* */ - cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ; + cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a; /* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */ /* The RSSI formula should be modified according to the gain table */ @@ -178,7 +178,7 @@ static void odm_RxPhyStatus92CSeries_Parsing( /* */ - /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */ + /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */ /* */ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1)&0x7f)-110; diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h index 3e58cb806c8c..a7381173d1a3 100644 --- a/drivers/staging/rtl8723bs/hal/odm_debug.h +++ b/drivers/staging/rtl8723bs/hal/odm_debug.h @@ -13,7 +13,7 @@ /* Define the debug levels */ /* */ /* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */ -/* So that, they can help SW engineer to develope or trace states changed */ +/* So that, they can help SW engineer to developed or trace states changed */ /* and also help HW enginner to trace every operation to and from HW, */ /* e.g IO, Tx, Rx. */ /* */ @@ -34,7 +34,7 @@ #define ODM_DBG_SERIOUS 2 /* */ -/* Abnormal, rare, or unexpeted cases. */ +/* Abnormal, rare, or unexpected cases. */ /* For example, */ /* IRP/Packet/OID canceled, */ /* device suprisely unremoved and so on. */ diff --git a/drivers/staging/rtl8723bs/hal/odm_types.h b/drivers/staging/rtl8723bs/hal/odm_types.h index 28f42c9c3dd7..c79fc1813c3f 100644 --- a/drivers/staging/rtl8723bs/hal/odm_types.h +++ b/drivers/staging/rtl8723bs/hal/odm_types.h @@ -28,7 +28,7 @@ typedef enum _HAL_STATUS { /* */ -/* Declare for ODM spin lock defintion temporarily fro compile pass. */ +/* Declare for ODM spin lock definition temporarily from compile pass. */ /* */ typedef enum _RT_SPINLOCK_TYPE { RT_TX_SPINLOCK = 1, diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c index 71b5a50b6ef6..fd2b74003faf 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c @@ -146,7 +146,7 @@ static void ConstructBeacon(struct adapter *padapter, u8 *pframe, u32 *pLength) SetFrameSubType(pframe, WIFI_BEACON); pframe += sizeof(struct ieee80211_hdr_3addr); - pktlen = sizeof (struct ieee80211_hdr_3addr); + pktlen = sizeof(struct ieee80211_hdr_3addr); /* timestamp will be inserted by hardware */ pframe += 8; @@ -823,8 +823,11 @@ static void ConstructProbeRsp(struct adapter *padapter, u8 *pframe, u32 *pLength } #endif /* CONFIG_AP_WOWLAN */ -/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */ -/* 2010.06.23. Added by tynli. */ +/* + * To check if reserved page content is destroyed by beacon because beacon + * is too large. + */ +/* 2010.06.23. Added by tynli. */ void CheckFwRsvdPageContent(struct adapter *Adapter) { } @@ -1409,16 +1412,20 @@ void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable) } #endif /* CONFIG_AP_WOWLAN */ -/* */ -/* Description: Fill the reserved packets that FW will use to RSVD page. */ -/* Now we just send 4 types packet to rsvd page. */ -/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */ -/* Input: */ -/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */ -/* so we need to set the packet length to total lengh. */ -/* true: At the second time, we should send the first packet (default:beacon) */ -/* to Hw again and set the lengh in descriptor to the real beacon lengh. */ -/* 2009.10.15 by tynli. */ +/* + * Description: Fill the reserved packets that FW will use to RSVD page. + * Now we just send 4 types packet to rsvd page. + * (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. + * + * Input: + * + * bDLFinished - false: At the first time we will send all the packets as + * a large packet to Hw, so we need to set the packet length to total length. + * + * true: At the second time, we should send the first packet (default:beacon) + * to Hw again and set the length in descriptor to the real beacon length. + */ +/* 2009.10.15 by tynli. */ static void rtl8723b_set_FwRsvdPagePkt( struct adapter *padapter, bool bDLFinished ) @@ -1599,7 +1606,7 @@ static void rtl8723b_set_FwRsvdPagePkt( #ifdef CONFIG_GTK_OL BufIndex += (CurtPktPageNum*PageSize); - /* if the ap staion info. exists, get the kek, kck from staion info. */ + /* if the ap station info. exists, get the kek, kck from station info. */ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv)); if (!psta) { memset(kek, 0, RTW_KEK_LEN); @@ -1652,7 +1659,7 @@ static void rtl8723b_set_FwRsvdPagePkt( TotalPacketLen = BufIndex-TxDescLen + 256; /* extension memory for FW */ #else - TotalPacketLen = BufIndex-TxDescLen + sizeof (union pn48); /* IV len */ + TotalPacketLen = BufIndex - TxDescLen + sizeof(union pn48); /* IV len */ #endif /* CONFIG_GTK_OL */ } else #endif /* CONFIG_WOWLAN */ @@ -1791,18 +1798,19 @@ error: } #ifdef CONFIG_AP_WOWLAN -/* */ -/* Description: Fill the reserved packets that FW will use to RSVD page. */ -/* Now we just send 2 types packet to rsvd page. (1)Beacon, (2)ProbeRsp. */ -/* */ -/* Input: bDLFinished */ -/* */ -/* false: At the first time we will send all the packets as a large packet to Hw, */ -/* so we need to set the packet length to total lengh. */ -/* */ -/* true: At the second time, we should send the first packet (default:beacon) */ -/* to Hw again and set the lengh in descriptor to the real beacon lengh. */ -/* 2009.10.15 by tynli. */ +/* + * Description: Fill the reserved packets that FW will use to RSVD page. + * Now we just send 2 types packet to rsvd page. (1)Beacon, (2)ProbeRsp. + * + * Input: bDLFinished + * + * false: At the first time we will send all the packets as a large packet to + * Hw, so we need to set the packet length to total length. + * + * true: At the second time, we should send the first packet (default:beacon) + * to Hw again and set the length in descriptor to the real beacon length. + */ +/* 2009.10.15 by tynli. */ static void rtl8723b_set_AP_FwRsvdPagePkt( struct adapter *padapter, bool bDLFinished ) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c index 1e8b61443408..c3051ebaeb78 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c @@ -192,7 +192,7 @@ static inline union recv_frame *try_alloc_recvframe(struct recv_priv *precvpriv, rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue); - /* The case of can't allocte recvframe should be temporary, */ + /* The case of can't allocate recvframe should be temporary, */ /* schedule again and hope recvframe is available next time. */ tasklet_schedule(&precvpriv->recv_tasklet); } @@ -480,10 +480,8 @@ initbuferror: precvpriv->precv_buf = NULL; } - if (precvpriv->pallocated_recv_buf) { - kfree(precvpriv->pallocated_recv_buf); - precvpriv->pallocated_recv_buf = NULL; - } + kfree(precvpriv->pallocated_recv_buf); + precvpriv->pallocated_recv_buf = NULL; exit: return res; @@ -518,8 +516,6 @@ void rtl8723bs_free_recv_priv(struct adapter *padapter) precvpriv->precv_buf = NULL; } - if (precvpriv->pallocated_recv_buf) { - kfree(precvpriv->pallocated_recv_buf); - precvpriv->pallocated_recv_buf = NULL; - } + kfree(precvpriv->pallocated_recv_buf); + precvpriv->pallocated_recv_buf = NULL; } diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index b44e902ed338..44799c4a9f35 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -282,7 +282,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv /* check xmit_buf size enough or not */ txlen = txdesc_size + rtw_wlan_pkt_size(pxmitframe); - if( !pxmitbuf || + if (!pxmitbuf || ((_RND(pxmitbuf->len, 8) + txlen) > max_xmit_len) || (k >= (rtw_hal_sdio_max_txoqt_free_space(padapter) - 1)) ) { @@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context) s32 ret; struct adapter *padapter; struct xmit_priv *pxmitpriv; - u8 thread_name[20] = "RTWHALXT"; - + u8 thread_name[20]; ret = _SUCCESS; padapter = context; pxmitpriv = &padapter->xmitpriv; - rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter)); + rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter)); thread_enter(thread_name); DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter)); diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c index e813382e78a6..7853af53051d 100644 --- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c +++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c @@ -235,7 +235,7 @@ static void _InitQueueReservedPage(struct adapter *padapter) if (pHalData->OutEpQueueSel & TX_SELE_LQ) numLQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_LPQ_8723B : NORMAL_PAGE_NUM_LPQ_8723B; - /* NOTE: This step shall be proceed before writting REG_RQPN. */ + /* NOTE: This step shall be proceed before writing REG_RQPN. */ if (pHalData->OutEpQueueSel & TX_SELE_NQ) numNQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_NPQ_8723B : NORMAL_PAGE_NUM_NPQ_8723B; @@ -551,18 +551,8 @@ static void HalRxAggr8723BSdio(struct adapter *padapter) pregistrypriv = &padapter->registrypriv; - if (pregistrypriv->wifi_spec) { - /* 2010.04.27 hpfan */ - /* Adjust RxAggrTimeout to close to zero disable RxAggr, suggested by designer */ - /* Timeout value is calculated by 34 / (2^n) */ - valueDMATimeout = 0x06; - valueDMAPageCount = 0x06; - } else { - /* 20130530, Isaac@SD1 suggest 3 kinds of parameter */ - /* TX/RX Balance */ - valueDMATimeout = 0x06; - valueDMAPageCount = 0x06; - } + valueDMATimeout = 0x06; + valueDMAPageCount = 0x06; rtw_write8(padapter, REG_RXDMA_AGG_PG_TH + 1, valueDMATimeout); rtw_write8(padapter, REG_RXDMA_AGG_PG_TH, valueDMAPageCount); diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h index 160f34efbfd5..c548fb126683 100644 --- a/drivers/staging/rtl8723bs/include/HalVerDef.h +++ b/drivers/staging/rtl8723bs/include/HalVerDef.h @@ -20,7 +20,7 @@ typedef enum tag_HAL_IC_Type_Definition CHIP_8821 = 7, CHIP_8723B = 8, CHIP_8192E = 9, -}HAL_IC_TYPE_E; +} HAL_IC_TYPE_E; /* HAL_CHIP_TYPE_E */ typedef enum tag_HAL_CHIP_Type_Definition @@ -28,7 +28,7 @@ typedef enum tag_HAL_CHIP_Type_Definition TEST_CHIP = 0, NORMAL_CHIP = 1, FPGA = 2, -}HAL_CHIP_TYPE_E; +} HAL_CHIP_TYPE_E; /* HAL_CUT_VERSION_E */ typedef enum tag_HAL_Cut_Version_Definition @@ -44,7 +44,7 @@ typedef enum tag_HAL_Cut_Version_Definition I_CUT_VERSION = 8, J_CUT_VERSION = 9, K_CUT_VERSION = 10, -}HAL_CUT_VERSION_E; +} HAL_CUT_VERSION_E; /* HAL_Manufacturer */ typedef enum tag_HAL_Manufacturer_Version_Definition @@ -52,7 +52,7 @@ typedef enum tag_HAL_Manufacturer_Version_Definition CHIP_VENDOR_TSMC = 0, CHIP_VENDOR_UMC = 1, CHIP_VENDOR_SMIC = 2, -}HAL_VENDOR_E; +} HAL_VENDOR_E; typedef enum tag_HAL_RF_Type_Definition { @@ -64,7 +64,7 @@ typedef enum tag_HAL_RF_Type_Definition RF_TYPE_3T3R = 5, RF_TYPE_3T4R = 6, RF_TYPE_4T4R = 7, -}HAL_RF_TYPE_E; +} HAL_RF_TYPE_E; typedef struct tag_HAL_VERSION { @@ -74,14 +74,14 @@ typedef struct tag_HAL_VERSION HAL_VENDOR_E VendorType; HAL_RF_TYPE_E RFType; u8 ROMVer; -}HAL_VERSION,*PHAL_VERSION; +} HAL_VERSION, *PHAL_VERSION; /* VERSION_8192C VersionID; */ /* HAL_VERSION VersionID; */ /* Get element */ -#define GET_CVID_IC_TYPE(version) ((HAL_IC_TYPE_E)((version).ICType) ) -#define GET_CVID_CHIP_TYPE(version) ((HAL_CHIP_TYPE_E)((version).ChipType) ) +#define GET_CVID_IC_TYPE(version) ((HAL_IC_TYPE_E)((version).ICType)) +#define GET_CVID_CHIP_TYPE(version) ((HAL_CHIP_TYPE_E)((version).ChipType)) #define GET_CVID_RF_TYPE(version) ((HAL_RF_TYPE_E)((version).RFType)) #define GET_CVID_MANUFACTUER(version) ((HAL_VENDOR_E)((version).VendorType)) #define GET_CVID_CUT_VERSION(version) ((HAL_CUT_VERSION_E)((version).CUTVersion)) @@ -93,8 +93,8 @@ typedef struct tag_HAL_VERSION /* HAL_VERSION VersionID */ /* HAL_CHIP_TYPE_E */ -#define IS_TEST_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==TEST_CHIP)? true: false) -#define IS_NORMAL_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==NORMAL_CHIP)? true: false) +#define IS_TEST_CHIP(version) ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false) +#define IS_NORMAL_CHIP(version) ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) /* HAL_CUT_VERSION_E */ #define IS_A_CUT(version) ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false) @@ -107,13 +107,13 @@ typedef struct tag_HAL_VERSION #define IS_K_CUT(version) ((GET_CVID_CUT_VERSION(version) == K_CUT_VERSION) ? true : false) /* HAL_VENDOR_E */ -#define IS_CHIP_VENDOR_TSMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)? true: false) -#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC)? true: false) -#define IS_CHIP_VENDOR_SMIC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_SMIC)? true: false) +#define IS_CHIP_VENDOR_TSMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false) +#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false) +#define IS_CHIP_VENDOR_SMIC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_SMIC) ? true : false) /* HAL_RF_TYPE_E */ -#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R)? true : false) -#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)? true : false) -#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)? true : false) +#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false) +#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false) +#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false) #endif diff --git a/drivers/staging/rtl8723bs/include/cmd_osdep.h b/drivers/staging/rtl8723bs/include/cmd_osdep.h index d3af9f44ad59..5506f513dc01 100644 --- a/drivers/staging/rtl8723bs/include/cmd_osdep.h +++ b/drivers/staging/rtl8723bs/include/cmd_osdep.h @@ -10,8 +10,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv); int rtw_init_evt_priv(struct evt_priv *pevtpriv); -extern void _rtw_free_evt_priv (struct evt_priv *pevtpriv); -extern void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv); +extern void _rtw_free_evt_priv(struct evt_priv *pevtpriv); +extern void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv); int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj); extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue); diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h index 6ec9087f2eb1..dba75216cbfe 100644 --- a/drivers/staging/rtl8723bs/include/drv_types.h +++ b/drivers/staging/rtl8723bs/include/drv_types.h @@ -77,7 +77,7 @@ enum _NIC_VERSION { #define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4) #define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5) -struct specific_device_id{ +struct specific_device_id { u32 flags; @@ -151,8 +151,8 @@ struct registry_priv u8 lowrate_two_xmit; - u8 rf_config ; - u8 low_power ; + u8 rf_config; + u8 low_power; u8 wifi_spec;/* !turbo_mode */ @@ -498,11 +498,11 @@ enum ADAPTER_TYPE { MAX_ADAPTER = 0xFF, }; -typedef enum _DRIVER_STATE{ +typedef enum _DRIVER_STATE { DRIVER_NORMAL = 0, DRIVER_DISAPPEAR = 1, DRIVER_REPLACE_DONGLE = 2, -}DRIVER_STATE; +} DRIVER_STATE; struct adapter { int DriverState;/* for disable driver using module, use dongle to replace module. */ diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h index f5c3ce5da70c..a46626d0509a 100644 --- a/drivers/staging/rtl8723bs/include/hal_com.h +++ b/drivers/staging/rtl8723bs/include/hal_com.h @@ -111,54 +111,54 @@ #define DESC_RATEVHTSS4MCS9 0x53 #define HDATA_RATE(rate)\ -(rate ==DESC_RATE1M)?"CCK_1M":\ -(rate ==DESC_RATE2M)?"CCK_2M":\ -(rate ==DESC_RATE5_5M)?"CCK5_5M":\ -(rate ==DESC_RATE11M)?"CCK_11M":\ -(rate ==DESC_RATE6M)?"OFDM_6M":\ -(rate ==DESC_RATE9M)?"OFDM_9M":\ -(rate ==DESC_RATE12M)?"OFDM_12M":\ -(rate ==DESC_RATE18M)?"OFDM_18M":\ -(rate ==DESC_RATE24M)?"OFDM_24M":\ -(rate ==DESC_RATE36M)?"OFDM_36M":\ -(rate ==DESC_RATE48M)?"OFDM_48M":\ -(rate ==DESC_RATE54M)?"OFDM_54M":\ -(rate ==DESC_RATEMCS0)?"MCS0":\ -(rate ==DESC_RATEMCS1)?"MCS1":\ -(rate ==DESC_RATEMCS2)?"MCS2":\ -(rate ==DESC_RATEMCS3)?"MCS3":\ -(rate ==DESC_RATEMCS4)?"MCS4":\ -(rate ==DESC_RATEMCS5)?"MCS5":\ -(rate ==DESC_RATEMCS6)?"MCS6":\ -(rate ==DESC_RATEMCS7)?"MCS7":\ -(rate ==DESC_RATEMCS8)?"MCS8":\ -(rate ==DESC_RATEMCS9)?"MCS9":\ -(rate ==DESC_RATEMCS10)?"MCS10":\ -(rate ==DESC_RATEMCS11)?"MCS11":\ -(rate ==DESC_RATEMCS12)?"MCS12":\ -(rate ==DESC_RATEMCS13)?"MCS13":\ -(rate ==DESC_RATEMCS14)?"MCS14":\ -(rate ==DESC_RATEMCS15)?"MCS15":\ -(rate ==DESC_RATEVHTSS1MCS0)?"VHTSS1MCS0":\ -(rate ==DESC_RATEVHTSS1MCS1)?"VHTSS1MCS1":\ -(rate ==DESC_RATEVHTSS1MCS2)?"VHTSS1MCS2":\ -(rate ==DESC_RATEVHTSS1MCS3)?"VHTSS1MCS3":\ -(rate ==DESC_RATEVHTSS1MCS4)?"VHTSS1MCS4":\ -(rate ==DESC_RATEVHTSS1MCS5)?"VHTSS1MCS5":\ -(rate ==DESC_RATEVHTSS1MCS6)?"VHTSS1MCS6":\ -(rate ==DESC_RATEVHTSS1MCS7)?"VHTSS1MCS7":\ -(rate ==DESC_RATEVHTSS1MCS8)?"VHTSS1MCS8":\ -(rate ==DESC_RATEVHTSS1MCS9)?"VHTSS1MCS9":\ -(rate ==DESC_RATEVHTSS2MCS0)?"VHTSS2MCS0":\ -(rate ==DESC_RATEVHTSS2MCS1)?"VHTSS2MCS1":\ -(rate ==DESC_RATEVHTSS2MCS2)?"VHTSS2MCS2":\ -(rate ==DESC_RATEVHTSS2MCS3)?"VHTSS2MCS3":\ -(rate ==DESC_RATEVHTSS2MCS4)?"VHTSS2MCS4":\ -(rate ==DESC_RATEVHTSS2MCS5)?"VHTSS2MCS5":\ -(rate ==DESC_RATEVHTSS2MCS6)?"VHTSS2MCS6":\ -(rate ==DESC_RATEVHTSS2MCS7)?"VHTSS2MCS7":\ -(rate ==DESC_RATEVHTSS2MCS8)?"VHTSS2MCS8":\ -(rate ==DESC_RATEVHTSS2MCS9)?"VHTSS2MCS9":"UNKNOW" +(rate == DESC_RATE1M) ? "CCK_1M" : \ +(rate == DESC_RATE2M) ? "CCK_2M" : \ +(rate == DESC_RATE5_5M) ? "CCK5_5M" : \ +(rate == DESC_RATE11M) ? "CCK_11M" : \ +(rate == DESC_RATE6M) ? "OFDM_6M" : \ +(rate == DESC_RATE9M) ? "OFDM_9M" : \ +(rate == DESC_RATE12M) ? "OFDM_12M" : \ +(rate == DESC_RATE18M) ? "OFDM_18M" : \ +(rate == DESC_RATE24M) ? "OFDM_24M" : \ +(rate == DESC_RATE36M) ? "OFDM_36M" : \ +(rate == DESC_RATE48M) ? "OFDM_48M" : \ +(rate == DESC_RATE54M) ? "OFDM_54M" : \ +(rate == DESC_RATEMCS0) ? "MCS0" : \ +(rate == DESC_RATEMCS1) ? "MCS1" : \ +(rate == DESC_RATEMCS2) ? "MCS2" : \ +(rate == DESC_RATEMCS3) ? "MCS3" : \ +(rate == DESC_RATEMCS4) ? "MCS4" : \ +(rate == DESC_RATEMCS5) ? "MCS5" : \ +(rate == DESC_RATEMCS6) ? "MCS6" : \ +(rate == DESC_RATEMCS7) ? "MCS7" : \ +(rate == DESC_RATEMCS8) ? "MCS8" : \ +(rate == DESC_RATEMCS9) ? "MCS9" : \ +(rate == DESC_RATEMCS10) ? "MCS10" : \ +(rate == DESC_RATEMCS11) ? "MCS11" : \ +(rate == DESC_RATEMCS12) ? "MCS12" : \ +(rate == DESC_RATEMCS13) ? "MCS13" : \ +(rate == DESC_RATEMCS14) ? "MCS14" : \ +(rate == DESC_RATEMCS15) ? "MCS15" : \ +(rate == DESC_RATEVHTSS1MCS0) ? "VHTSS1MCS0" : \ +(rate == DESC_RATEVHTSS1MCS1) ? "VHTSS1MCS1" : \ +(rate == DESC_RATEVHTSS1MCS2) ? "VHTSS1MCS2" : \ +(rate == DESC_RATEVHTSS1MCS3) ? "VHTSS1MCS3" : \ +(rate == DESC_RATEVHTSS1MCS4) ? "VHTSS1MCS4" : \ +(rate == DESC_RATEVHTSS1MCS5) ? "VHTSS1MCS5" : \ +(rate == DESC_RATEVHTSS1MCS6) ? "VHTSS1MCS6" : \ +(rate == DESC_RATEVHTSS1MCS7) ? "VHTSS1MCS7" : \ +(rate == DESC_RATEVHTSS1MCS8) ? "VHTSS1MCS8" : \ +(rate == DESC_RATEVHTSS1MCS9) ? "VHTSS1MCS9" : \ +(rate == DESC_RATEVHTSS2MCS0) ? "VHTSS2MCS0" : \ +(rate == DESC_RATEVHTSS2MCS1) ? "VHTSS2MCS1" : \ +(rate == DESC_RATEVHTSS2MCS2) ? "VHTSS2MCS2" : \ +(rate == DESC_RATEVHTSS2MCS3) ? "VHTSS2MCS3" : \ +(rate == DESC_RATEVHTSS2MCS4) ? "VHTSS2MCS4" : \ +(rate == DESC_RATEVHTSS2MCS5) ? "VHTSS2MCS5" : \ +(rate == DESC_RATEVHTSS2MCS6) ? "VHTSS2MCS6" : \ +(rate == DESC_RATEVHTSS2MCS7) ? "VHTSS2MCS7" : \ +(rate == DESC_RATEVHTSS2MCS8) ? "VHTSS2MCS8" : \ +(rate == DESC_RATEVHTSS2MCS9) ? "VHTSS2MCS9" : "UNKNOW" enum{ @@ -235,7 +235,7 @@ s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf); u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type); void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta); -void hw_var_port_switch (struct adapter *adapter); +void hw_var_port_switch(struct adapter *adapter); void SetHwReg(struct adapter *padapter, u8 variable, u8 *val); void GetHwReg(struct adapter *padapter, u8 variable, u8 *val); diff --git a/drivers/staging/rtl8723bs/include/hal_com_h2c.h b/drivers/staging/rtl8723bs/include/hal_com_h2c.h index 7dbae5e2050e..b951bc288b89 100644 --- a/drivers/staging/rtl8723bs/include/hal_com_h2c.h +++ b/drivers/staging/rtl8723bs/include/hal_com_h2c.h @@ -11,7 +11,7 @@ /* H2C CMD DEFINITION ------------------------------------------------ */ /* */ /* 88e, 8723b, 8812, 8821, 92e use the same FW code base */ -enum h2c_cmd{ +enum h2c_cmd { /* Common Class: 000 */ H2C_RSVD_PAGE = 0x00, H2C_MEDIA_STATUS_RPT = 0x01, @@ -96,9 +96,9 @@ enum h2c_cmd{ #define H2C_PROBERSP_RSVDPAGE_LEN 5 #ifdef CONFIG_WOWLAN -#define eqMacAddr(a, b) (((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0) -#define cpMacAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3], (des)[4]=(src)[4], (des)[5]=(src)[5]) -#define cpIpAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3]) +#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0) +#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5]) +#define cpIpAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3]) /* */ /* ARP packet */ diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h index e9a3006a3e20..9fff4aa36546 100644 --- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h +++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h @@ -177,7 +177,7 @@ u8 Channel, bool *bIn24G ); -s8 phy_get_tx_pwr_lmt (struct adapter *adapter, u32 RegPwrTblSel, +s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 RegPwrTblSel, enum BAND_TYPE Band, enum CHANNEL_WIDTH Bandwidth, u8 RfPath, u8 DataRate, diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h index 31a187b35810..37fa59a352d6 100644 --- a/drivers/staging/rtl8723bs/include/hal_com_reg.h +++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h @@ -707,13 +707,13 @@ Default: 00b. /* ALL CCK Rate */ -#define RATE_ALL_CCK RATR_1M|RATR_2M|RATR_55M|RATR_11M -#define RATE_ALL_OFDM_AG RATR_6M|RATR_9M|RATR_12M|RATR_18M|RATR_24M|\ - RATR_36M|RATR_48M|RATR_54M -#define RATE_ALL_OFDM_1SS RATR_MCS0|RATR_MCS1|RATR_MCS2|RATR_MCS3 |\ - RATR_MCS4|RATR_MCS5|RATR_MCS6 |RATR_MCS7 -#define RATE_ALL_OFDM_2SS RATR_MCS8|RATR_MCS9 |RATR_MCS10|RATR_MCS11|\ - RATR_MCS12|RATR_MCS13|RATR_MCS14|RATR_MCS15 +#define RATE_ALL_CCK RATR_1M | RATR_2M | RATR_55M | RATR_11M +#define RATE_ALL_OFDM_AG RATR_6M | RATR_9M | RATR_12M | RATR_18M | RATR_24M |\ + RATR_36M | RATR_48M | RATR_54M +#define RATE_ALL_OFDM_1SS RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | RATR_MCS3 |\ + RATR_MCS4 | RATR_MCS5 | RATR_MCS6 | RATR_MCS7 +#define RATE_ALL_OFDM_2SS RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | RATR_MCS11 |\ + RATR_MCS12 | RATR_MCS13 | RATR_MCS14 | RATR_MCS15 #define RATE_BITMAP_ALL 0xFFFFF diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h index 24926ebaf950..1de5acaef8ff 100644 --- a/drivers/staging/rtl8723bs/include/hal_intf.h +++ b/drivers/staging/rtl8723bs/include/hal_intf.h @@ -296,7 +296,7 @@ enum wowlan_subcode { WOWLAN_AP_DISABLE = 13 }; -struct wowlan_ioctl_param{ +struct wowlan_ioctl_param { unsigned int subcode; unsigned int subcode_value; unsigned int wakeup_reason; diff --git a/drivers/staging/rtl8723bs/include/hal_phy.h b/drivers/staging/rtl8723bs/include/hal_phy.h index c6b9bf139ef6..ed0caa0574e3 100644 --- a/drivers/staging/rtl8723bs/include/hal_phy.h +++ b/drivers/staging/rtl8723bs/include/hal_phy.h @@ -25,7 +25,7 @@ #define RF6052_MAX_REG_92C 0x7F #define RF6052_MAX_REG \ - (RF6052_MAX_REG_88E > RF6052_MAX_REG_92C) ? RF6052_MAX_REG_88E: RF6052_MAX_REG_92C + (RF6052_MAX_REG_88E > RF6052_MAX_REG_92C) ? RF6052_MAX_REG_88E : RF6052_MAX_REG_92C #define GET_RF6052_REAL_MAX_REG(_Adapter) RF6052_MAX_REG_92C diff --git a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h index b40868b2e76f..419ddb0733aa 100644 --- a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h +++ b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h @@ -58,9 +58,9 @@ u32 Data ); /* MAC/BB/RF HAL config */ -int PHY_BBConfig8723B(struct adapter *Adapter ); +int PHY_BBConfig8723B(struct adapter *Adapter); -int PHY_RFConfig8723B(struct adapter *Adapter ); +int PHY_RFConfig8723B(struct adapter *Adapter); s32 PHY_MACConfig8723B(struct adapter *padapter); diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h index 130a94879805..28aca047dce6 100644 --- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h +++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h @@ -46,9 +46,9 @@ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]= 0 and WLSUS_EN 0x04[11]= 0*/ \ - {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* Disable USB suspend */ \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \ - {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* Enable USB suspend */ \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]= 1*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]= 0*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \ diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h index 2110552b8e59..7243e656d589 100644 --- a/drivers/staging/rtl8723bs/include/ieee80211.h +++ b/drivers/staging/rtl8723bs/include/ieee80211.h @@ -98,8 +98,8 @@ enum { #define WPA_SELECTOR_LEN 4 -extern u8 RTW_WPA_OUI_TYPE[] ; -extern u16 RTW_WPA_VERSION ; +extern u8 RTW_WPA_OUI_TYPE[]; +extern u16 RTW_WPA_VERSION; extern u8 WPA_AUTH_KEY_MGMT_NONE[]; extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[]; extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[]; @@ -139,7 +139,7 @@ typedef enum _RATEID_IDX_ { RATEID_IDX_VHT_1SS = 10, } RATEID_IDX, *PRATEID_IDX; -typedef enum _RATR_TABLE_MODE{ +typedef enum _RATR_TABLE_MODE { RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */ RATR_INX_WIRELESS_NG = 1, /* GN or N */ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */ @@ -149,7 +149,7 @@ typedef enum _RATR_TABLE_MODE{ RATR_INX_WIRELESS_B = 6, RATR_INX_WIRELESS_MC = 7, RATR_INX_WIRELESS_AC_N = 8, -}RATR_TABLE_MODE, *PRATR_TABLE_MODE; +} RATR_TABLE_MODE, *PRATR_TABLE_MODE; enum NETWORK_TYPE @@ -248,7 +248,7 @@ struct ieee_param_ex { u8 data[0]; }; -struct sta_data{ +struct sta_data { u16 aid; u16 capability; int flags; @@ -439,7 +439,7 @@ struct ieee80211_snap_hdr { #define IEEE80211_OFDM_SHIFT_MASK_A 4 -enum MGN_RATE{ +enum MGN_RATE { MGN_1M = 0x02, MGN_2M = 0x04, MGN_5_5M = 0x0B, @@ -906,7 +906,7 @@ enum rtw_ieee80211_spectrum_mgmt_actioncode { RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5, }; -enum _PUBLIC_ACTION{ +enum _PUBLIC_ACTION { ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */ ACT_PUBLIC_DSE_ENABLE = 1, ACT_PUBLIC_DSE_DEENABLE = 2, @@ -953,7 +953,7 @@ enum rtw_ieee80211_back_parties { }; /* VHT features action code */ -enum rtw_ieee80211_vht_actioncode{ +enum rtw_ieee80211_vht_actioncode { RTW_WLAN_ACTION_VHT_COMPRESSED_BEAMFORMING = 0, RTW_WLAN_ACTION_VHT_GROUPID_MANAGEMENT = 1, RTW_WLAN_ACTION_VHT_OPMODE_NOTIFICATION = 2, @@ -1128,7 +1128,7 @@ u8 *rtw_get_ie(u8*pbuf, sint index, sint *len, sint limit); u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen); int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len); -void rtw_set_supported_rate(u8 *SupportedRates, uint mode) ; +void rtw_set_supported_rate(u8 *SupportedRates, uint mode); unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit); unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit); @@ -1142,8 +1142,8 @@ void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen); u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen); -u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_attr, u32 *len_attr); -u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_content, uint *len_content); +u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr); +u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content); /** * for_each_ie - iterate over continuous IEs diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h index fa16139fcce6..c59c1384944b 100644 --- a/drivers/staging/rtl8723bs/include/osdep_intf.h +++ b/drivers/staging/rtl8723bs/include/osdep_intf.h @@ -50,7 +50,7 @@ void rtw_reset_drv_sw(struct adapter *padapter); void rtw_dev_unload(struct adapter *padapter); u32 rtw_start_drv_threads(struct adapter *padapter); -void rtw_stop_drv_threads (struct adapter *padapter); +void rtw_stop_drv_threads(struct adapter *padapter); void rtw_cancel_all_timer(struct adapter *padapter); int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h index a40cf7b60a69..5f681899bbec 100644 --- a/drivers/staging/rtl8723bs/include/osdep_service.h +++ b/drivers/staging/rtl8723bs/include/osdep_service.h @@ -80,7 +80,7 @@ enum mstat_f { #define mstat_tf_idx(flags) ((flags)&0xff) #define mstat_ff_idx(flags) (((flags)&0xff00) >> 8) -typedef enum mstat_status{ +typedef enum mstat_status { MSTAT_ALLOC_SUCCESS = 0, MSTAT_ALLOC_FAIL, MSTAT_FREE @@ -117,7 +117,7 @@ static inline void thread_enter(char *name) static inline void flush_signals_thread(void) { - if (signal_pending (current)) + if (signal_pending(current)) { flush_signals(current); } @@ -134,14 +134,14 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par } #define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r)) -#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2) +#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0 : 1)) << 2) static inline u32 _RND4(u32 sz) { u32 val; - val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2; + val = ((sz >> 2) + ((sz & 3) ? 1 : 0)) << 2; return val; @@ -152,7 +152,7 @@ static inline u32 _RND8(u32 sz) u32 val; - val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3; + val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3; return val; diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h index a2d9de866c4b..1710fa3eeb71 100644 --- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h +++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h @@ -80,7 +80,7 @@ static inline struct list_head *get_list_head(struct __queue *queue) static inline void _set_timer(_timer *ptimer, u32 delay_time) { - mod_timer(ptimer , (jiffies+(delay_time*HZ/1000))); + mod_timer(ptimer, (jiffies + (delay_time * HZ / 1000))); } static inline void _cancel_timer(_timer *ptimer, u8 *bcancelled) diff --git a/drivers/staging/rtl8723bs/include/recv_osdep.h b/drivers/staging/rtl8723bs/include/recv_osdep.h index 1056f615d0f9..e85aafc93f6d 100644 --- a/drivers/staging/rtl8723bs/include/recv_osdep.h +++ b/drivers/staging/rtl8723bs/include/recv_osdep.h @@ -9,7 +9,7 @@ extern sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter); -extern void _rtw_free_recv_priv (struct recv_priv *precvpriv); +extern void _rtw_free_recv_priv(struct recv_priv *precvpriv); extern s32 rtw_recv_entry(union recv_frame *precv_frame); @@ -19,7 +19,7 @@ extern void rtw_recv_returnpacket(_nic_hdl cnxt, _pkt *preturnedpkt); extern void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup); int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter); -void rtw_free_recv_priv (struct recv_priv *precvpriv); +void rtw_free_recv_priv(struct recv_priv *precvpriv); void rtw_os_recv_resource_alloc(struct adapter *padapter, union recv_frame *precvframe); diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h index ecfd2d383ac2..3bfb0e9be582 100644 --- a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h +++ b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h @@ -11,7 +11,7 @@ /* H2C CMD DEFINITION ------------------------------------------------ */ /* */ -enum h2c_cmd_8723B{ +enum h2c_cmd_8723B { /* Common Class: 000 */ H2C_8723B_RSVD_PAGE = 0x00, H2C_8723B_MEDIA_STATUS_RPT = 0x01, diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h index 987b9f12a4f4..d712c6d36a08 100644 --- a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h +++ b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h @@ -8,7 +8,7 @@ #define __RTL8723B_RF_H__ -int PHY_RF6052_Config8723B(struct adapter *Adapter ); +int PHY_RF6052_Config8723B(struct adapter *Adapter); void PHY_RF6052SetBandwidth8723B(struct adapter *Adapter, diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h index 23a6069c3e5d..320ca65e5faa 100644 --- a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h @@ -176,7 +176,7 @@ typedef struct txdesc_8723b u32 txbf_path:1; u32 seq:12; u32 final_data_rate:8; -}TXDESC_8723B, *PTXDESC_8723B; +} TXDESC_8723B, *PTXDESC_8723B; #ifndef __INC_HAL8723BDESC_H #define __INC_HAL8723BDESC_H diff --git a/drivers/staging/rtl8723bs/include/rtw_byteorder.h b/drivers/staging/rtl8723bs/include/rtw_byteorder.h index f76fbfbed4c7..c3a65f971793 100644 --- a/drivers/staging/rtl8723bs/include/rtw_byteorder.h +++ b/drivers/staging/rtl8723bs/include/rtw_byteorder.h @@ -7,7 +7,7 @@ #ifndef _RTL871X_BYTEORDER_H_ #define _RTL871X_BYTEORDER_H_ -#if defined (__LITTLE_ENDIAN) +#if defined(__LITTLE_ENDIAN) #include <linux/byteorder/little_endian.h> #else # include <linux/byteorder/big_endian.h> diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h index b83824ca2e31..3e025a652e38 100644 --- a/drivers/staging/rtl8723bs/include/rtw_cmd.h +++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h @@ -75,7 +75,7 @@ do {\ INIT_LIST_HEAD(&pcmd->list);\ pcmd->cmdcode = code;\ pcmd->parmbuf = (u8 *)(pparm);\ - pcmd->cmdsz = sizeof (*pparm);\ + pcmd->cmdsz = sizeof(*pparm);\ pcmd->rsp = NULL;\ pcmd->rspsz = 0;\ } while (0) @@ -129,9 +129,9 @@ extern void rtw_free_cmd_obj(struct cmd_obj *pcmd); void rtw_stop_cmd_thread(struct adapter *adapter); int rtw_cmd_thread(void *context); -extern void rtw_free_cmd_priv (struct cmd_priv *pcmdpriv); +extern void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv); -extern void rtw_free_evt_priv (struct evt_priv *pevtpriv); +extern void rtw_free_evt_priv(struct evt_priv *pevtpriv); extern void rtw_evt_notify_isr(struct evt_priv *pevtpriv); enum rtw_drvextra_cmd_id @@ -163,10 +163,10 @@ enum LPS_CTRL_TYPE { LPS_CTRL_SCAN = 0, LPS_CTRL_JOINBSS = 1, - LPS_CTRL_CONNECT =2, - LPS_CTRL_DISCONNECT =3, - LPS_CTRL_SPECIAL_PACKET =4, - LPS_CTRL_LEAVE =5, + LPS_CTRL_CONNECT = 2, + LPS_CTRL_DISCONNECT = 3, + LPS_CTRL_SPECIAL_PACKET = 4, + LPS_CTRL_LEAVE = 5, LPS_CTRL_TRAFFIC_BUSY = 6, }; @@ -692,40 +692,40 @@ struct getratable_rsp { /* to get TX, RX retry count */ -struct gettxretrycnt_parm{ +struct gettxretrycnt_parm { unsigned int rsvd; }; -struct gettxretrycnt_rsp{ +struct gettxretrycnt_rsp { unsigned long tx_retrycnt; }; -struct getrxretrycnt_parm{ +struct getrxretrycnt_parm { unsigned int rsvd; }; -struct getrxretrycnt_rsp{ +struct getrxretrycnt_rsp { unsigned long rx_retrycnt; }; /* to get BCNOK, BCNERR count */ -struct getbcnokcnt_parm{ +struct getbcnokcnt_parm { unsigned int rsvd; }; -struct getbcnokcnt_rsp{ +struct getbcnokcnt_rsp { unsigned long bcnokcnt; }; -struct getbcnerrcnt_parm{ +struct getbcnerrcnt_parm { unsigned int rsvd; }; -struct getbcnerrcnt_rsp{ +struct getbcnerrcnt_rsp { unsigned long bcnerrcnt; }; /* to get current TX power level */ -struct getcurtxpwrlevel_parm{ +struct getcurtxpwrlevel_parm { unsigned int rsvd; }; -struct getcurtxpwrlevel_rsp{ +struct getcurtxpwrlevel_rsp { unsigned short tx_power; }; @@ -883,56 +883,56 @@ struct _cmd_callback { enum rtw_h2c_cmd { - GEN_CMD_CODE(_Read_MACREG) , /*0*/ - GEN_CMD_CODE(_Write_MACREG) , - GEN_CMD_CODE(_Read_BBREG) , - GEN_CMD_CODE(_Write_BBREG) , - GEN_CMD_CODE(_Read_RFREG) , - GEN_CMD_CODE(_Write_RFREG) , /*5*/ - GEN_CMD_CODE(_Read_EEPROM) , - GEN_CMD_CODE(_Write_EEPROM) , - GEN_CMD_CODE(_Read_EFUSE) , - GEN_CMD_CODE(_Write_EFUSE) , - - GEN_CMD_CODE(_Read_CAM) , /*10*/ - GEN_CMD_CODE(_Write_CAM) , + GEN_CMD_CODE(_Read_MACREG), /*0*/ + GEN_CMD_CODE(_Write_MACREG), + GEN_CMD_CODE(_Read_BBREG), + GEN_CMD_CODE(_Write_BBREG), + GEN_CMD_CODE(_Read_RFREG), + GEN_CMD_CODE(_Write_RFREG), /*5*/ + GEN_CMD_CODE(_Read_EEPROM), + GEN_CMD_CODE(_Write_EEPROM), + GEN_CMD_CODE(_Read_EFUSE), + GEN_CMD_CODE(_Write_EFUSE), + + GEN_CMD_CODE(_Read_CAM), /*10*/ + GEN_CMD_CODE(_Write_CAM), GEN_CMD_CODE(_setBCNITV), GEN_CMD_CODE(_setMBIDCFG), GEN_CMD_CODE(_JoinBss), /*14*/ - GEN_CMD_CODE(_DisConnect) , /*15*/ - GEN_CMD_CODE(_CreateBss) , - GEN_CMD_CODE(_SetOpMode) , + GEN_CMD_CODE(_DisConnect), /*15*/ + GEN_CMD_CODE(_CreateBss), + GEN_CMD_CODE(_SetOpMode), GEN_CMD_CODE(_SiteSurvey), /*18*/ - GEN_CMD_CODE(_SetAuth) , - - GEN_CMD_CODE(_SetKey) , /*20*/ - GEN_CMD_CODE(_SetStaKey) , - GEN_CMD_CODE(_SetAssocSta) , - GEN_CMD_CODE(_DelAssocSta) , - GEN_CMD_CODE(_SetStaPwrState) , - GEN_CMD_CODE(_SetBasicRate) , /*25*/ - GEN_CMD_CODE(_GetBasicRate) , - GEN_CMD_CODE(_SetDataRate) , - GEN_CMD_CODE(_GetDataRate) , - GEN_CMD_CODE(_SetPhyInfo) , - - GEN_CMD_CODE(_GetPhyInfo) , /*30*/ - GEN_CMD_CODE(_SetPhy) , - GEN_CMD_CODE(_GetPhy) , - GEN_CMD_CODE(_readRssi) , - GEN_CMD_CODE(_readGain) , - GEN_CMD_CODE(_SetAtim) , /*35*/ - GEN_CMD_CODE(_SetPwrMode) , + GEN_CMD_CODE(_SetAuth), + + GEN_CMD_CODE(_SetKey), /*20*/ + GEN_CMD_CODE(_SetStaKey), + GEN_CMD_CODE(_SetAssocSta), + GEN_CMD_CODE(_DelAssocSta), + GEN_CMD_CODE(_SetStaPwrState), + GEN_CMD_CODE(_SetBasicRate), /*25*/ + GEN_CMD_CODE(_GetBasicRate), + GEN_CMD_CODE(_SetDataRate), + GEN_CMD_CODE(_GetDataRate), + GEN_CMD_CODE(_SetPhyInfo), + + GEN_CMD_CODE(_GetPhyInfo), /*30*/ + GEN_CMD_CODE(_SetPhy), + GEN_CMD_CODE(_GetPhy), + GEN_CMD_CODE(_readRssi), + GEN_CMD_CODE(_readGain), + GEN_CMD_CODE(_SetAtim), /*35*/ + GEN_CMD_CODE(_SetPwrMode), GEN_CMD_CODE(_JoinbssRpt), - GEN_CMD_CODE(_SetRaTable) , - GEN_CMD_CODE(_GetRaTable) , + GEN_CMD_CODE(_SetRaTable), + GEN_CMD_CODE(_GetRaTable), GEN_CMD_CODE(_GetCCXReport), /*40*/ GEN_CMD_CODE(_GetDTMReport), GEN_CMD_CODE(_GetTXRateStatistics), GEN_CMD_CODE(_SetUsbSuspend), GEN_CMD_CODE(_SetH2cLbk), - GEN_CMD_CODE(_AddBAReq) , /*45*/ + GEN_CMD_CODE(_AddBAReq), /*45*/ GEN_CMD_CODE(_SetChannel), /*46*/ GEN_CMD_CODE(_SetTxPower), GEN_CMD_CODE(_SwitchAntenna), diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h index 22fc5d730d7b..c90adfb87261 100644 --- a/drivers/staging/rtl8723bs/include/rtw_debug.h +++ b/drivers/staging/rtl8723bs/include/rtw_debug.h @@ -131,13 +131,13 @@ #define _MODULE_DEFINE_ _module_efuse_ #endif -#define RT_TRACE(_Comp, _Level, Fmt) do{}while (0) -#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) do{}while (0) +#define RT_TRACE(_Comp, _Level, Fmt) do {} while (0) +#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) do {} while (0) #define DBG_871X(x, ...) do {} while (0) #define MSG_8192C(x, ...) do {} while (0) -#define DBG_8192C(x,...) do {} while (0) -#define DBG_871X_LEVEL(x,...) do {} while (0) +#define DBG_8192C(x, ...) do {} while (0) +#define DBG_871X_LEVEL(x, ...) do {} while (0) #undef _dbgdump @@ -161,8 +161,8 @@ _dbgdump(DRIVER_PREFIX"ERROR " fmt, ##arg);\ else \ _dbgdump(DRIVER_PREFIX fmt, ##arg);\ - }\ - }while (0) + } \ + } while (0) /* without driver-defined prefix */ #undef _DBG_871X_LEVEL @@ -173,8 +173,8 @@ _dbgdump("ERROR " fmt, ##arg);\ else \ _dbgdump(fmt, ##arg);\ - }\ - }while (0) + } \ + } while (0) #define RTW_DBGDUMP NULL /* 'stream' for _dbgdump */ @@ -203,17 +203,17 @@ #undef DBG_871X #define DBG_871X(...) do {\ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\ - }while (0) + } while (0) #undef MSG_8192C #define MSG_8192C(...) do {\ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\ - }while (0) + } while (0) #undef DBG_8192C #define DBG_8192C(...) do {\ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\ - }while (0) + } while (0) #endif /* defined(_dbgdump) */ #endif /* DEBUG */ @@ -227,8 +227,8 @@ if ((_Comp & GlobalDebugComponents) && (_Level <= GlobalDebugLevel)) {\ _dbgdump("%s [0x%08x,%d]", DRIVER_PREFIX, (unsigned int)_Comp, _Level);\ _dbgdump Fmt;\ - }\ - }while (0) + } \ + } while (0) #endif /* defined(_dbgdump) && defined(_MODULE_DEFINE_) */ @@ -242,7 +242,7 @@ u8 *ptr = (u8 *)_HexData; \ _dbgdump("%s", DRIVER_PREFIX); \ _dbgdump(_TitleString); \ - for (__i = 0; __i<(int)_HexDataLen; __i++) \ + for (__i = 0; __i < (int)_HexDataLen; __i++) \ { \ _dbgdump("%02X%s", ptr[__i], (((__i + 1) % 4) == 0)?" ":" "); \ if (((__i + 1) % 16) == 0) _dbgdump("\n"); \ diff --git a/drivers/staging/rtl8723bs/include/rtw_eeprom.h b/drivers/staging/rtl8723bs/include/rtw_eeprom.h index 1fcd79fa1c21..704c6461333a 100644 --- a/drivers/staging/rtl8723bs/include/rtw_eeprom.h +++ b/drivers/staging/rtl8723bs/include/rtw_eeprom.h @@ -91,7 +91,7 @@ typedef enum _RT_CUSTOMER_ID RT_CID_819x_ALPHA_Dlink = 44,/* add by ylb 20121012 for customer led for alpha */ RT_CID_WNC_NEC = 45,/* add by page for NEC */ RT_CID_DNI_BUFFALO = 46,/* add by page for NEC */ -}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID; +} RT_CUSTOMER_ID, *PRT_CUSTOMER_ID; struct eeprom_priv { diff --git a/drivers/staging/rtl8723bs/include/rtw_efuse.h b/drivers/staging/rtl8723bs/include/rtw_efuse.h index 9d9a5a3e336d..4abcbbc8f513 100644 --- a/drivers/staging/rtl8723bs/include/rtw_efuse.h +++ b/drivers/staging/rtl8723bs/include/rtw_efuse.h @@ -57,15 +57,15 @@ enum _EFUSE_DEF_TYPE { #define EFUSE_MAX_WORD_UNIT 4 /*------------------------------Define structure----------------------------*/ -typedef struct PG_PKT_STRUCT_A{ +typedef struct PG_PKT_STRUCT_A { u8 offset; u8 word_en; u8 data[8]; u8 word_cnts; -}PGPKT_STRUCT,*PPGPKT_STRUCT; +} PGPKT_STRUCT, *PPGPKT_STRUCT; /*------------------------------Define structure----------------------------*/ -typedef struct _EFUSE_HAL{ +typedef struct _EFUSE_HAL { u8 fakeEfuseBank; u32 fakeEfuseUsedBytes; u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE]; @@ -82,7 +82,7 @@ typedef struct _EFUSE_HAL{ u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE]; u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN]; u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN]; -}EFUSE_HAL, *PEFUSE_HAL; +} EFUSE_HAL, *PEFUSE_HAL; /*------------------------Export global variable----------------------------*/ diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h index ee80aa21eb10..aeaabab780e5 100644 --- a/drivers/staging/rtl8723bs/include/rtw_event.h +++ b/drivers/staging/rtl8723bs/include/rtw_event.h @@ -82,7 +82,7 @@ struct fwevent { #define C2HEVENT_SZ 32 -struct event_node{ +struct event_node { unsigned char *node; unsigned char evt_code; unsigned short evt_sz; diff --git a/drivers/staging/rtl8723bs/include/rtw_ht.h b/drivers/staging/rtl8723bs/include/rtw_ht.h index 952c804dd0fd..4c224c128327 100644 --- a/drivers/staging/rtl8723bs/include/rtw_ht.h +++ b/drivers/staging/rtl8723bs/include/rtw_ht.h @@ -38,7 +38,7 @@ struct ht_priv }; -typedef enum AGGRE_SIZE{ +typedef enum AGGRE_SIZE { HT_AGG_SIZE_8K = 0, HT_AGG_SIZE_16K = 1, HT_AGG_SIZE_32K = 2, @@ -47,9 +47,9 @@ typedef enum AGGRE_SIZE{ VHT_AGG_SIZE_256K = 5, VHT_AGG_SIZE_512K = 6, VHT_AGG_SIZE_1024K = 7, -}AGGRE_SIZE_E, *PAGGRE_SIZE_E; +} AGGRE_SIZE_E, *PAGGRE_SIZE_E; -typedef enum _RT_HT_INF0_CAP{ +typedef enum _RT_HT_INF0_CAP { RT_HT_CAP_USE_TURBO_AGGR = 0x01, RT_HT_CAP_USE_LONG_PREAMBLE = 0x02, RT_HT_CAP_USE_AMPDU = 0x04, @@ -58,13 +58,13 @@ typedef enum _RT_HT_INF0_CAP{ RT_HT_CAP_USE_92SE = 0x20, RT_HT_CAP_USE_88C_92C = 0x40, RT_HT_CAP_USE_AP_CLIENT_MODE = 0x80, /* AP team request to reserve this bit, by Emily */ -}RT_HT_INF0_CAPBILITY, *PRT_HT_INF0_CAPBILITY; +} RT_HT_INF0_CAPBILITY, *PRT_HT_INF0_CAPBILITY; -typedef enum _RT_HT_INF1_CAP{ +typedef enum _RT_HT_INF1_CAP { RT_HT_CAP_USE_VIDEO_CLIENT = 0x01, RT_HT_CAP_USE_JAGUAR_BCUT = 0x02, RT_HT_CAP_USE_JAGUAR_CCUT = 0x04, -}RT_HT_INF1_CAPBILITY, *PRT_HT_INF1_CAPBILITY; +} RT_HT_INF1_CAPBILITY, *PRT_HT_INF1_CAPBILITY; #define LDPC_HT_ENABLE_RX BIT0 #define LDPC_HT_ENABLE_TX BIT1 diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h index 99d104b3647a..2581b5165d1b 100644 --- a/drivers/staging/rtl8723bs/include/rtw_io.h +++ b/drivers/staging/rtl8723bs/include/rtw_io.h @@ -168,7 +168,7 @@ struct reg_protocol_rd { u32 Byte2Access : 1; u32 Byte1Access : 1; - u32 BurstMode :1 ; + u32 BurstMode :1; u32 FixOrContinuous : 1; u32 Reserved4 : 16; @@ -224,7 +224,7 @@ struct reg_protocol_wt { u32 Byte2Access : 1; u32 Byte1Access : 1; - u32 BurstMode :1 ; + u32 BurstMode :1; u32 FixOrContinuous : 1; u32 Reserved4 : 16; @@ -259,7 +259,7 @@ struct io_queue { struct intf_hdl intf; }; -struct io_priv{ +struct io_priv { struct adapter *padapter; diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h index 362737b83c3a..14e4bce28856 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h @@ -85,7 +85,7 @@ typedef enum _RT_SCAN_TYPE { SCAN_PASSIVE, SCAN_ACTIVE, SCAN_MIX, -}RT_SCAN_TYPE, *PRT_SCAN_TYPE; +} RT_SCAN_TYPE, *PRT_SCAN_TYPE; enum _BAND { GHZ24_50 = 0, @@ -135,7 +135,7 @@ struct sitesurvey_ctrl { _timer sitesurvey_ctrl_timer; }; -typedef struct _RT_LINK_DETECT_T{ +typedef struct _RT_LINK_DETECT_T { u32 NumTxOkInPeriod; u32 NumRxOkInPeriod; u32 NumRxUnicastOkInPeriod; @@ -148,62 +148,62 @@ typedef struct _RT_LINK_DETECT_T{ /* u8 TrafficBusyState; */ u8 TrafficTransitionCount; u32 LowPowerTransitionCount; -}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T; +} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T; struct profile_info { u8 ssidlen; - u8 ssid[ WLAN_SSID_MAXLEN ]; - u8 peermac[ ETH_ALEN ]; + u8 ssid[WLAN_SSID_MAXLEN]; + u8 peermac[ETH_ALEN]; }; -struct tx_invite_req_info{ +struct tx_invite_req_info { u8 token; u8 benable; - u8 go_ssid[ WLAN_SSID_MAXLEN ]; + u8 go_ssid[WLAN_SSID_MAXLEN]; u8 ssidlen; - u8 go_bssid[ ETH_ALEN ]; - u8 peer_macaddr[ ETH_ALEN ]; + u8 go_bssid[ETH_ALEN]; + u8 peer_macaddr[ETH_ALEN]; u8 operating_ch; /* This information will be set by using the p2p_set op_ch =x */ u8 peer_ch; /* The listen channel for peer P2P device */ }; -struct tx_invite_resp_info{ +struct tx_invite_resp_info { u8 token; /* Used to record the dialog token of p2p invitation request frame. */ }; -struct tx_provdisc_req_info{ +struct tx_provdisc_req_info { u16 wps_config_method_request; /* Used when sending the provisioning request frame */ u16 peer_channel_num[2]; /* The channel number which the receiver stands. */ struct ndis_802_11_ssid ssid; - u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */ - u8 peerIFAddr[ ETH_ALEN ]; /* Peer interface address */ + u8 peerDevAddr[ETH_ALEN]; /* Peer device address */ + u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */ u8 benable; /* This provision discovery request frame is trigger to send or not */ }; -struct rx_provdisc_req_info{ /* When peer device issue prov_disc_req first, we should store the following informations */ - u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */ +struct rx_provdisc_req_info { /* When peer device issue prov_disc_req first, we should store the following informations */ + u8 peerDevAddr[ETH_ALEN]; /* Peer device address */ u8 strconfig_method_desc_of_prov_disc_req[4]; /* description for the config method located in the provisioning discovery request frame. */ /* The UI must know this information to know which config method the remote p2p device is requiring. */ }; -struct tx_nego_req_info{ +struct tx_nego_req_info { u16 peer_channel_num[2]; /* The channel number which the receiver stands. */ - u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */ + u8 peerDevAddr[ETH_ALEN]; /* Peer device address */ u8 benable; /* This negoitation request frame is trigger to send or not */ }; -struct group_id_info{ - u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of this P2P group */ - u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */ +struct group_id_info { + u8 go_device_addr[ETH_ALEN]; /* The GO's device address of this P2P group */ + u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */ }; -struct scan_limit_info{ +struct scan_limit_info { u8 scan_op_ch_only; /* When this flag is set, the driver should just scan the operation channel */ u8 operation_ch[2]; /* Store the operation channel of invitation request frame */ }; -struct cfg80211_wifidirect_info{ +struct cfg80211_wifidirect_info { _timer remain_on_ch_timer; u8 restore_channel; struct ieee80211_channel remain_on_ch_channel; @@ -213,7 +213,7 @@ struct cfg80211_wifidirect_info{ unsigned long last_ro_ch_time; /* this will be updated at the beginning and end of ro_ch */ }; -struct wifidirect_info{ +struct wifidirect_info { struct adapter * padapter; _timer find_phase_timer; _timer restore_p2p_state_timer; @@ -225,7 +225,7 @@ struct wifidirect_info{ struct tx_provdisc_req_info tx_prov_disc_info; struct rx_provdisc_req_info rx_prov_disc_info; struct tx_invite_req_info invitereq_info; - struct profile_info profileinfo[ P2P_MAX_PERSISTENT_GROUP_NUM ]; /* Store the profile information of persistent group */ + struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM]; /* Store the profile information of persistent group */ struct tx_invite_resp_info inviteresp_info; struct tx_nego_req_info nego_req_info; struct group_id_info groupid_info; /* Store the group id information when doing the group negotiation handshake. */ @@ -243,17 +243,17 @@ struct wifidirect_info{ u8 support_rate[8]; u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN]; u8 intent; /* should only include the intent value. */ - u8 p2p_peer_interface_addr[ ETH_ALEN ]; - u8 p2p_peer_device_addr[ ETH_ALEN ]; + u8 p2p_peer_interface_addr[ETH_ALEN]; + u8 p2p_peer_device_addr[ETH_ALEN]; u8 peer_intent; /* Included the intent value and tie breaker value. */ - u8 device_name[ WPS_MAX_DEVICE_NAME_LEN ]; /* Device name for displaying on searching device screen */ + u8 device_name[WPS_MAX_DEVICE_NAME_LEN]; /* Device name for displaying on searching device screen */ u8 device_name_len; u8 profileindex; /* Used to point to the index of profileinfo array */ u8 peer_operating_ch; u8 find_phase_state_exchange_cnt; u16 device_password_id_for_nego; /* The device password ID for group negotation */ u8 negotiation_dialog_token; - u8 nego_ssid[ WLAN_SSID_MAXLEN ]; /* SSID information for group negotitation */ + u8 nego_ssid[WLAN_SSID_MAXLEN]; /* SSID information for group negotitation */ u8 nego_ssidlen; u8 p2p_group_ssid[WLAN_SSID_MAXLEN]; u8 p2p_group_ssid_len; @@ -287,13 +287,13 @@ struct wifidirect_info{ u8 driver_interface; /* Indicate DRIVER_WEXT or DRIVER_CFG80211 */ }; -struct tdls_ss_record{ /* signal strength record */ +struct tdls_ss_record { /* signal strength record */ u8 macaddr[ETH_ALEN]; u8 rx_pwd_ba11; u8 is_tdls_sta; /* true: direct link sta, false: else */ }; -struct tdls_info{ +struct tdls_info { u8 ap_prohibited; u8 link_established; u8 sta_cnt; @@ -489,7 +489,7 @@ int event_thread(void *context); extern void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall); extern int rtw_init_mlme_priv(struct adapter *adapter);/* (struct mlme_priv *pmlmepriv); */ -extern void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv); +extern void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv); extern sint rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv); @@ -526,7 +526,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, sint state) { pmlmepriv->fw_state |= state; /* FOR HW integration */ - if (_FW_UNDER_SURVEY ==state) { + if (_FW_UNDER_SURVEY == state) { pmlmepriv->bScanInProcess = true; } } @@ -535,7 +535,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state) { pmlmepriv->fw_state &= ~state; /* FOR HW integration */ - if (_FW_UNDER_SURVEY ==state) { + if (_FW_UNDER_SURVEY == state) { pmlmepriv->bScanInProcess = false; } } diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h index 73e8ec09b6e1..6c1ed6211c7e 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h @@ -182,7 +182,7 @@ typedef enum _RT_CHANNEL_DOMAIN /* Add new channel plan above this line =============== */ RT_CHANNEL_DOMAIN_MAX, RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F, -}RT_CHANNEL_DOMAIN, *PRT_CHANNEL_DOMAIN; +} RT_CHANNEL_DOMAIN, *PRT_CHANNEL_DOMAIN; typedef enum _RT_CHANNEL_DOMAIN_2G { @@ -195,7 +195,7 @@ typedef enum _RT_CHANNEL_DOMAIN_2G RT_CHANNEL_DOMAIN_2G_NULL = 0x06, /* Add new channel plan above this line =============== */ RT_CHANNEL_DOMAIN_2G_MAX, -}RT_CHANNEL_DOMAIN_2G, *PRT_CHANNEL_DOMAIN_2G; +} RT_CHANNEL_DOMAIN_2G, *PRT_CHANNEL_DOMAIN_2G; typedef enum _RT_CHANNEL_DOMAIN_5G { @@ -237,33 +237,33 @@ typedef enum _RT_CHANNEL_DOMAIN_5G RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x21, RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x22, RT_CHANNEL_DOMAIN_5G_MAX, -}RT_CHANNEL_DOMAIN_5G, *PRT_CHANNEL_DOMAIN_5G; +} RT_CHANNEL_DOMAIN_5G, *PRT_CHANNEL_DOMAIN_5G; -#define rtw_is_channel_plan_valid(chplan) (chplan<RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE) +#define rtw_is_channel_plan_valid(chplan) (chplan < RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE) typedef struct _RT_CHANNEL_PLAN { unsigned char Channel[MAX_CHANNEL_NUM]; unsigned char Len; -}RT_CHANNEL_PLAN, *PRT_CHANNEL_PLAN; +} RT_CHANNEL_PLAN, *PRT_CHANNEL_PLAN; typedef struct _RT_CHANNEL_PLAN_2G { unsigned char Channel[MAX_CHANNEL_NUM_2G]; unsigned char Len; -}RT_CHANNEL_PLAN_2G, *PRT_CHANNEL_PLAN_2G; +} RT_CHANNEL_PLAN_2G, *PRT_CHANNEL_PLAN_2G; typedef struct _RT_CHANNEL_PLAN_5G { unsigned char Channel[MAX_CHANNEL_NUM_5G]; unsigned char Len; -}RT_CHANNEL_PLAN_5G, *PRT_CHANNEL_PLAN_5G; +} RT_CHANNEL_PLAN_5G, *PRT_CHANNEL_PLAN_5G; typedef struct _RT_CHANNEL_PLAN_MAP { unsigned char Index2G; unsigned char Index5G; -}RT_CHANNEL_PLAN_MAP, *PRT_CHANNEL_PLAN_MAP; +} RT_CHANNEL_PLAN_MAP, *PRT_CHANNEL_PLAN_MAP; enum Associated_AP { @@ -299,7 +299,7 @@ typedef enum _HT_IOT_PEER HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP = 16, HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP = 17, HT_IOT_PEER_MAX = 18 -}HT_IOT_PEER_E, *PHTIOT_PEER_E; +} HT_IOT_PEER_E, *PHTIOT_PEER_E; enum SCAN_STATE @@ -353,7 +353,7 @@ struct ss_res #define WIFI_FW_ASSOC_STATE 0x00002000 #define WIFI_FW_ASSOC_SUCCESS 0x00004000 -#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS |WIFI_FW_ASSOC_STATE) +#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE) struct FW_Sta_Info { @@ -434,7 +434,7 @@ typedef struct _RT_CHANNEL_INFO { u8 ChannelNum; /* The channel number. */ RT_SCAN_TYPE ScanType; /* Scan type such as passive or active scan. */ -}RT_CHANNEL_INFO, *PRT_CHANNEL_INFO; +} RT_CHANNEL_INFO, *PRT_CHANNEL_INFO; int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch); bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch); @@ -537,7 +537,7 @@ struct mlme_ext_priv void init_mlme_default_rate_set(struct adapter *padapter); void init_mlme_ext_priv(struct adapter *padapter); int init_hw_mlme_ext(struct adapter *padapter); -void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext); +void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext); extern void init_mlme_ext_timer(struct adapter *padapter); extern void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta); extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv); @@ -571,7 +571,7 @@ void SelectChannel(struct adapter *padapter, unsigned char channel); unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval); -void read_cam(struct adapter *padapter , u8 entry, u8 *get_key); +void read_cam(struct adapter *padapter, u8 entry, u8 *get_key); /* modify HW only */ void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key); @@ -708,8 +708,8 @@ void linked_status_chk(struct adapter *padapter); void _linked_info_dump(struct adapter *padapter); -void survey_timer_hdl (struct timer_list *t); -void link_timer_hdl (struct timer_list *t); +void survey_timer_hdl(struct timer_list *t); +void link_timer_hdl(struct timer_list *t); void addba_timer_hdl(struct timer_list *t); void sa_query_timer_hdl(struct timer_list *t); /* void reauth_timer_hdl(struct adapter *padapter); */ @@ -802,8 +802,8 @@ struct C2HEvent_Header unsigned int rsvd; }; -void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf); -void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf); +void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf); +void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf); enum rtw_c2h_event { @@ -818,10 +818,10 @@ enum rtw_c2h_event GEN_EVT_CODE(_Survey), /*8*/ GEN_EVT_CODE(_SurveyDone), /*9*/ - GEN_EVT_CODE(_JoinBss) , /*10*/ + GEN_EVT_CODE(_JoinBss), /*10*/ GEN_EVT_CODE(_AddSTA), GEN_EVT_CODE(_DelSTA), - GEN_EVT_CODE(_AtimDone) , + GEN_EVT_CODE(_AtimDone), GEN_EVT_CODE(_TX_Report), GEN_EVT_CODE(_CCX_Report), /*15*/ GEN_EVT_CODE(_DTM_Report), @@ -851,7 +851,7 @@ static struct fwevent wlanevents[] = {0, NULL}, {0, NULL}, {0, &rtw_survey_event_callback}, /*8*/ - {sizeof (struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/ + {sizeof(struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/ {0, &rtw_joinbss_event_callback}, /*10*/ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback}, diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h index bb3970d58573..e5a801b40582 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mp.h +++ b/drivers/staging/rtl8723bs/include/rtw_mp.h @@ -154,7 +154,7 @@ typedef struct _MPT_CONTEXT u32 mptOutLen; u8 mptOutBuf[100]; -}MPT_CONTEXT, *PMPT_CONTEXT; +} MPT_CONTEXT, *PMPT_CONTEXT; /* endif */ /* E-Fuse */ @@ -276,7 +276,7 @@ typedef struct _IOCMD_STRUCT_ { u8 cmdclass; u16 value; u8 index; -}IOCMD_STRUCT; +} IOCMD_STRUCT; struct rf_reg_param { u32 path; @@ -315,7 +315,7 @@ extern u8 mpdatarate[NumRates]; /* MP set force data rate base on the definition. */ enum MPT_RATE_INDEX { /* CCK rate. */ - MPT_RATE_1M = 0 , /* 0 */ + MPT_RATE_1M = 0, /* 0 */ MPT_RATE_2M, MPT_RATE_55M, MPT_RATE_11M, /* 3 */ @@ -473,9 +473,9 @@ void Hal_SetBandwidth(struct adapter *padapter); void Hal_SetTxPower(struct adapter *padapter); void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart); -void Hal_SetSingleToneTx (struct adapter *padapter , u8 bStart); -void Hal_SetSingleCarrierTx (struct adapter *padapter, u8 bStart); -void Hal_SetContinuousTx (struct adapter *padapter, u8 bStart); +void Hal_SetSingleToneTx(struct adapter *padapter, u8 bStart); +void Hal_SetSingleCarrierTx(struct adapter *padapter, u8 bStart); +void Hal_SetContinuousTx(struct adapter *padapter, u8 bStart); void Hal_SetBandwidth(struct adapter *padapter); void Hal_SetDataRate(struct adapter *padapter); @@ -494,8 +494,8 @@ void Hal_TriggerRFThermalMeter(struct adapter *padapter); u8 Hal_ReadRFThermalMeter(struct adapter *padapter); void Hal_SetCCKContinuousTx(struct adapter *padapter, u8 bStart); void Hal_SetOFDMContinuousTx(struct adapter *padapter, u8 bStart); -void Hal_ProSetCrystalCap (struct adapter *padapter , u32 CrystalCapVal); -void MP_PHY_SetRFPathSwitch(struct adapter *padapter , bool bMain); +void Hal_ProSetCrystalCap(struct adapter *padapter, u32 CrystalCapVal); +void MP_PHY_SetRFPathSwitch(struct adapter *padapter, bool bMain); u32 mpt_ProQueryCalTxPower(struct adapter *padapter, u8 RfPath); void MPT_PwrCtlDM(struct adapter *padapter, u32 bstart); u8 MptToMgntRate(u32 MptRateIdx); diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h index 012d8f54814f..98c3e92245b7 100644 --- a/drivers/staging/rtl8723bs/include/rtw_recv.h +++ b/drivers/staging/rtl8723bs/include/rtw_recv.h @@ -187,7 +187,7 @@ struct rx_pkt_attrib { /* These definition is used for Rx packet reordering. */ -#define SN_LESS(a, b) (((a-b)&0x800)!= 0) +#define SN_LESS(a, b) (((a - b) & 0x800) != 0) #define SN_EQUAL(a, b) (a == b) /* define REORDER_WIN_SIZE 128 */ /* define REORDER_ENTRY_NUM 128 */ @@ -369,12 +369,12 @@ struct recv_frame_hdr }; -union recv_frame{ +union recv_frame { union{ struct list_head list; struct recv_frame_hdr hdr; uint mem[RECVFRAME_HDR_ALIGN>>2]; - }u; + } u; /* uint mem[MAX_RXSZ>>2]; */ @@ -388,8 +388,8 @@ enum RX_PACKET_TYPE { C2H_PACKET }; -extern union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */ -extern union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */ +extern union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */ +extern union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */ extern int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue); #define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue) @@ -401,7 +401,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter); sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue); sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue); -struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue); +struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue); void rtw_reordering_ctrl_timeout_handler(struct timer_list *t); @@ -444,7 +444,7 @@ static inline u8 *recvframe_pull(union recv_frame *precvframe, sint sz) return NULL; } - precvframe->u.hdr.len -=sz; + precvframe->u.hdr.len -= sz; return precvframe->u.hdr.rx_data; @@ -471,7 +471,7 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz) return NULL; } - precvframe->u.hdr.len +=sz; + precvframe->u.hdr.len += sz; return precvframe->u.hdr.rx_tail; @@ -497,7 +497,7 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz) return NULL; } - precvframe->u.hdr.len -=sz; + precvframe->u.hdr.len -= sz; return precvframe->u.hdr.rx_tail; diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h index bea184452edd..aa60b6f867dd 100644 --- a/drivers/staging/rtl8723bs/include/rtw_security.h +++ b/drivers/staging/rtl8723bs/include/rtw_security.h @@ -208,7 +208,7 @@ struct sha256_state { }; #define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\ -do{\ +do {\ switch (psecuritypriv->dot11AuthAlgrthm)\ {\ case dot11AuthAlgrthm_Open:\ @@ -220,18 +220,18 @@ do{\ if (bmcst)\ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\ else\ - encry_algo =(u8) psta->dot118021XPrivacy;\ + encry_algo = (u8)psta->dot118021XPrivacy;\ break;\ case dot11AuthAlgrthm_WAPI:\ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\ break;\ - }\ -}while (0) + } \ +} while (0) #define _AES_IV_LEN_ 8 #define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\ -do{\ +do {\ switch (encrypt)\ {\ case _WEP40_:\ @@ -255,19 +255,19 @@ do{\ iv_len = 0;\ icv_len = 0;\ break;\ - }\ -}while (0) + } \ +} while (0) #define GET_TKIP_PN(iv, dot11txpn)\ -do{\ - dot11txpn._byte_.TSC0 =iv[2];\ - dot11txpn._byte_.TSC1 =iv[0];\ - dot11txpn._byte_.TSC2 =iv[4];\ - dot11txpn._byte_.TSC3 =iv[5];\ - dot11txpn._byte_.TSC4 =iv[6];\ - dot11txpn._byte_.TSC5 =iv[7];\ -}while (0) +do {\ + dot11txpn._byte_.TSC0 = iv[2];\ + dot11txpn._byte_.TSC1 = iv[0];\ + dot11txpn._byte_.TSC2 = iv[4];\ + dot11txpn._byte_.TSC3 = iv[5];\ + dot11txpn._byte_.TSC4 = iv[6];\ + dot11txpn._byte_.TSC5 = iv[7];\ +} while (0) #define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1))) diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h index ea1396005a13..cd2be0056aa1 100644 --- a/drivers/staging/rtl8723bs/include/rtw_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h @@ -40,17 +40,17 @@ #define HW_QUEUE_ENTRY 8 #define WEP_IV(pattrib_iv, dot11txpn, keyidx)\ -do{\ +do {\ pattrib_iv[0] = dot11txpn._byte_.TSC0;\ pattrib_iv[1] = dot11txpn._byte_.TSC1;\ pattrib_iv[2] = dot11txpn._byte_.TSC2;\ pattrib_iv[3] = ((keyidx & 0x3)<<6);\ - dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0: (dot11txpn.val+1);\ -}while (0) + dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val + 1);\ +} while (0) #define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\ -do{\ +do {\ pattrib_iv[0] = dot11txpn._byte_.TSC1;\ pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\ pattrib_iv[2] = dot11txpn._byte_.TSC0;\ @@ -59,11 +59,11 @@ do{\ pattrib_iv[5] = dot11txpn._byte_.TSC3;\ pattrib_iv[6] = dot11txpn._byte_.TSC4;\ pattrib_iv[7] = dot11txpn._byte_.TSC5;\ - dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\ -}while (0) + dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\ +} while (0) #define AES_IV(pattrib_iv, dot11txpn, keyidx)\ -do{\ +do {\ pattrib_iv[0] = dot11txpn._byte_.TSC0;\ pattrib_iv[1] = dot11txpn._byte_.TSC1;\ pattrib_iv[2] = 0;\ @@ -72,8 +72,8 @@ do{\ pattrib_iv[5] = dot11txpn._byte_.TSC3;\ pattrib_iv[6] = dot11txpn._byte_.TSC4;\ pattrib_iv[7] = dot11txpn._byte_.TSC5;\ - dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\ -}while (0) + dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\ +} while (0) #define HWXMIT_ENTRY 4 @@ -217,7 +217,7 @@ enum { XMITBUF_CMD = 2, }; -struct submit_ctx{ +struct submit_ctx { unsigned long submit_time; /* */ u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */ int status; /* status for operation */ @@ -274,7 +274,7 @@ struct xmit_buf u8 pg_num; u8 agg_num; -#if defined(DBG_XMIT_BUF)|| defined(DBG_XMIT_BUF_EXT) +#if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT) u8 no; #endif @@ -350,7 +350,7 @@ struct hw_txqueue { sint ac_tag; }; -struct agg_pkt_info{ +struct agg_pkt_info { u16 offset; u16 pkt_len; }; @@ -484,7 +484,7 @@ void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry); s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); -void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); +void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); s32 rtw_alloc_hwxmits(struct adapter *padapter); diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h index 3acce5630f8e..c9aa3b5097a7 100644 --- a/drivers/staging/rtl8723bs/include/sta_info.h +++ b/drivers/staging/rtl8723bs/include/sta_info.h @@ -31,13 +31,13 @@ struct wlan_acl_pool { struct __queue acl_node_q; }; -typedef struct _RSSI_STA{ +typedef struct _RSSI_STA { s32 UndecoratedSmoothedPWDB; s32 UndecoratedSmoothedCCK; s32 UndecoratedSmoothedOFDM; u64 PacketMap; u8 ValidBit; -}RSSI_STA, *PRSSI_STA; +} RSSI_STA, *PRSSI_STA; struct stainfo_stats { @@ -304,7 +304,7 @@ struct sta_info { #define STA_RX_PKTS_DIFF_ARG(sta) \ sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts \ , sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts \ - , sta->sta_stats.rx_data_pkts -sta->sta_stats.last_rx_data_pkts + , sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts #define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)" @@ -374,7 +374,7 @@ int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta); struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset); extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr); -extern u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta); +extern u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta); extern void rtw_free_all_stainfo(struct adapter *padapter); extern struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr); extern u32 rtw_init_bcmc_stainfo(struct adapter *padapter); diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h index 2faf83704ff0..88a6e982ce01 100644 --- a/drivers/staging/rtl8723bs/include/wifi.h +++ b/drivers/staging/rtl8723bs/include/wifi.h @@ -707,7 +707,7 @@ struct HT_caps_element unsigned char ASEL_caps; } HT_cap_element; unsigned char HT_cap[26]; - }u; + } u; } __attribute__ ((packed)); struct HT_info_element @@ -1102,7 +1102,7 @@ enum P2P_PROTO_WK_ID P2P_PRE_TX_PROVDISC_PROCESS_WK = 2, P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3, P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4, - P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5, + P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5, P2P_RO_CH_WK = 6, }; @@ -1126,8 +1126,8 @@ enum P2P_PROTO_WK_ID #define WFD_DEVINFO_PC_TDLS 0x0080 #define WFD_DEVINFO_HDCP_SUPPORT 0x0100 -#define IP_MCAST_MAC(mac) ((mac[0]== 0x01) && (mac[1]== 0x00) && (mac[2]== 0x5e)) -#define ICMPV6_MCAST_MAC(mac) ((mac[0]== 0x33) && (mac[1]== 0x33) && (mac[2]!= 0xff)) +#define IP_MCAST_MAC(mac) ((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e)) +#define ICMPV6_MCAST_MAC(mac) ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] != 0xff)) /* Regulatroy Domain */ struct regd_pair_mapping { diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h index a61412b54ec0..e9ff274f7474 100644 --- a/drivers/staging/rtl8723bs/include/xmit_osdep.h +++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h @@ -35,8 +35,8 @@ void rtw_os_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitb extern uint rtw_remainder_len(struct pkt_file *pfile); extern void _rtw_open_pktfile(_pkt *pkt, struct pkt_file *pfile); -extern uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen); -extern sint rtw_endofpktfile (struct pkt_file *pfile); +extern uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen); +extern sint rtw_endofpktfile(struct pkt_file *pfile); extern void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt); extern void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 322cabb97b99..1ba85a43f05a 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -254,7 +254,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl /* DBG_8192C("%s\n", __func__); */ - bssinf_len = pnetwork->network.IELength+sizeof (struct ieee80211_hdr_3addr); + bssinf_len = pnetwork->network.IELength + sizeof(struct ieee80211_hdr_3addr); if (bssinf_len > MAX_BSSINFO_LEN) { DBG_871X("%s IE Length too long > %d byte\n", __func__, MAX_BSSINFO_LEN); goto exit; @@ -263,7 +263,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl { u16 wapi_len = 0; - if (rtw_get_wapi_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &wapi_len)>0) + if (rtw_get_wapi_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &wapi_len) > 0) { if (wapi_len > 0) { @@ -286,7 +286,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl wpsie = rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wpsielen); - if (wpsie && wpsielen>0) + if (wpsie && wpsielen > 0) psr = rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL); if (sr != 0) @@ -353,7 +353,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl pbuf += sizeof(struct ieee80211_hdr_3addr); - len = sizeof (struct ieee80211_hdr_3addr); + len = sizeof(struct ieee80211_hdr_3addr); memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength); len += pnetwork->network.IELength; @@ -402,7 +402,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter) cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss); - return (bss!= NULL); + return (bss != NULL); } void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter) @@ -424,7 +424,7 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter) struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network); struct wlan_network *scanned = pmlmepriv->cur_network_scanned; - if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true) + if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { memcpy(&cur_network->network, pnetwork, sizeof(struct wlan_bssid_ex)); @@ -579,7 +579,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa struct sta_info *psta = NULL, *pbcmc_sta = NULL; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; - struct security_priv* psecuritypriv =&(padapter->securitypriv); + struct security_priv* psecuritypriv = &(padapter->securitypriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("%s\n", __func__); @@ -633,7 +633,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa DBG_8192C("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len); - if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0)) + if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) { ret = -EINVAL; goto exit; @@ -673,7 +673,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa } - if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */ + if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */ { if (param->u.crypt.set_tx == 0) /* group key */ { @@ -681,7 +681,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa { DBG_8192C("%s, set group_key, WEP\n", __func__); - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) @@ -696,7 +696,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa psecuritypriv->dot118021XGrpPrivacy = _TKIP_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */ /* set mic key */ @@ -712,7 +712,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa psecuritypriv->dot118021XGrpPrivacy = _AES_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); } else { @@ -729,7 +729,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = false; @@ -748,7 +748,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa { if (param->u.crypt.set_tx == 1) /* pairwise key */ { - memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); if (strcmp(param->u.crypt.alg, "WEP") == 0) { @@ -799,7 +799,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa { if (strcmp(param->u.crypt.alg, "WEP") == 0) { - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) @@ -811,7 +811,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa { psecuritypriv->dot118021XGrpPrivacy = _TKIP_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */ /* set mic key */ @@ -825,7 +825,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa { psecuritypriv->dot118021XGrpPrivacy = _AES_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); } else { @@ -840,7 +840,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = false; @@ -940,7 +940,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */ { - struct sta_info * psta,*pbcmc_sta; + struct sta_info * psta, *pbcmc_sta; struct sta_priv * pstapriv = &padapter->stapriv; /* DBG_8192C("%s, : dot11AuthAlgrthm == dot11AuthAlgrthm_8021X\n", __func__); */ @@ -959,7 +959,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param psta->ieee8021x_blocked = false; - if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| + if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; @@ -970,7 +970,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param DBG_8192C("%s, : param->u.crypt.set_tx == 1\n", __func__); - memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */ { @@ -978,7 +978,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8); memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8); - padapter->securitypriv.busetkipkey =false; + padapter->securitypriv.busetkipkey = false; /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */ } @@ -991,21 +991,21 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param { if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) { - memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); - memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8); - memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8); + memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); + memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8); + memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8); padapter->securitypriv.binstallGrpkey = true; /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */ DBG_871X(" ~~~~set sta key:groupkey\n"); padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx; - rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true); + rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true); } else if (strcmp(param->u.crypt.alg, "BIP") == 0) { /* DBG_871X("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */ /* save the IGTK key, length 16 bytes */ - memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /*DBG_871X("IGTK key below:\n"); for (no = 0;no<16;no++) printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]); @@ -1017,7 +1017,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param } } - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta == NULL) { /* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */ @@ -1028,7 +1028,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param if (strcmp(param->u.crypt.alg, "none") != 0) pbcmc_sta->ieee8021x_blocked = false; - if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| + if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; @@ -1270,8 +1270,8 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy, /* for Ad-Hoc/AP mode */ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) - ||check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) - ||check_fwstate(pmlmepriv, WIFI_AP_STATE)) + || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) + || check_fwstate(pmlmepriv, WIFI_AP_STATE)) && check_fwstate(pmlmepriv, _FW_LINKED) ) { @@ -1346,7 +1346,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy, rtw_wdev->iftype = type; - if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false) + if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) { rtw_wdev->iftype = old_type; ret = -EPERM; @@ -1464,7 +1464,7 @@ static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *b DBG_8192C("%s, ielen =%d\n", __func__, len); #endif - if (len>0) + if (len > 0) { if ((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen))) { @@ -1503,8 +1503,8 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy int ret = 0; struct ndis_802_11_ssid *ssid = NULL; struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT]; - u8 survey_times =3; - u8 survey_times_for_one_ch =6; + u8 survey_times = 3; + u8 survey_times_for_one_ch = 6; struct cfg80211_ssid *ssids = request->ssids; int j = 0; bool need_indicate_scan_done = false; @@ -1556,7 +1556,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy goto check_need_indicate_scan_done; } - if (request->ie && request->ie_len>0) + if (request->ie && request->ie_len > 0) { rtw_cfg80211_set_probe_req_wpsp2pie(padapter, (u8 *)request->ie, request->ie_len); } @@ -1610,7 +1610,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy /* parsing channels, n_channels */ memset(ch, 0, sizeof(struct rtw_ieee80211_channel)*RTW_CHANNEL_SCAN_AMOUNT); - for (i = 0;i<request->n_channels && i<RTW_CHANNEL_SCAN_AMOUNT;i++) { + for (i = 0; i < request->n_channels && i < RTW_CHANNEL_SCAN_AMOUNT; i++) { #ifdef DEBUG_CFG80211 DBG_871X(FUNC_ADPT_FMT CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(request->channels[i])); #endif @@ -1620,12 +1620,12 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy spin_lock_bh(&pmlmepriv->lock); if (request->n_channels == 1) { - for (i = 1;i<survey_times_for_one_ch;i++) + for (i = 1; i < survey_times_for_one_ch; i++) memcpy(&ch[i], &ch[0], sizeof(struct rtw_ieee80211_channel)); _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times_for_one_ch); } else if (request->n_channels <= 4) { - for (j =request->n_channels-1;j>= 0;j--) - for (i = 0;i<survey_times;i++) + for (j = request->n_channels - 1; j >= 0; j--) + for (i = 0; i < survey_times; i++) { memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel)); } @@ -1699,7 +1699,7 @@ static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv, psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; - if (psecuritypriv->ndisauthtype>Ndis802_11AuthModeWPA) + if (psecuritypriv->ndisauthtype > Ndis802_11AuthModeWPA) psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; break; @@ -1767,7 +1767,7 @@ static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 ciph psecuritypriv->ndisencryptstatus = ndisencryptstatus; /* if (psecuritypriv->dot11PrivacyAlgrthm >= _AES_) */ - /* psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK; */ + /* psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK; */ } return 0; @@ -1799,7 +1799,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel int wpa_ielen = 0; int wpa2_ielen = 0; u8 *pwpa, *pwpa2; - u8 null_addr[]= {0, 0, 0, 0, 0, 0}; + u8 null_addr[] = {0, 0, 0, 0, 0, 0}; if (pie == NULL || !ielen) { /* Treat this as normal case, but need to clear WIFI_UNDER_WPS */ @@ -1818,13 +1818,13 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel goto exit; } - memcpy(buf, pie , ielen); + memcpy(buf, pie, ielen); /* dump */ { int i; DBG_8192C("set wpa_ie(length:%zu):\n", ielen); - for (i = 0;i<ielen;i =i+8) + for (i = 0; i < ielen; i = i + 8) DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]); } @@ -1835,12 +1835,12 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel } pwpa = rtw_get_wpa_ie(buf, &wpa_ielen, ielen); - if (pwpa && wpa_ielen>0) + if (pwpa && wpa_ielen > 0) { if (rtw_parse_wpa_ie(pwpa, wpa_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; - padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK; + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen+2); DBG_8192C("got wpa_ie, wpa_ielen:%u\n", wpa_ielen); @@ -1848,12 +1848,12 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel } pwpa2 = rtw_get_wpa2_ie(buf, &wpa2_ielen, ielen); - if (pwpa2 && wpa2_ielen>0) + if (pwpa2 && wpa2_ielen > 0) { if (rtw_parse_wpa2_ie(pwpa2, wpa2_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; - padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK; + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen+2); DBG_8192C("got wpa2_ie, wpa2_ielen:%u\n", wpa2_ielen); @@ -1873,7 +1873,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel { case WPA_CIPHER_NONE: padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; - padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled; + padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_; @@ -1897,7 +1897,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel { case WPA_CIPHER_NONE: padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; - padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled; + padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_; @@ -1924,7 +1924,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel wps_ie = rtw_get_wps_ie(buf, ielen, NULL, &wps_ielen); if (wps_ie && wps_ielen > 0) { DBG_8192C("got wps_ie, wps_ielen:%u\n", wps_ielen); - padapter->securitypriv.wps_ie_len = wps_ielen<MAX_WPS_IE_LEN?wps_ielen:MAX_WPS_IE_LEN; + padapter->securitypriv.wps_ie_len = wps_ielen < MAX_WPS_IE_LEN ? wps_ielen : MAX_WPS_IE_LEN; memcpy(padapter->securitypriv.wps_ie, wps_ie, padapter->securitypriv.wps_ie_len); set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS); } else { @@ -2027,7 +2027,7 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) rtw_wdev->iftype = NL80211_IFTYPE_STATION; - if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) ==false) + if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false) { rtw_wdev->iftype = old_type; ret = -EPERM; @@ -2185,7 +2185,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev, if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL) { - ret = -EOPNOTSUPP ; + ret = -EOPNOTSUPP; } kfree(pwep); @@ -2301,7 +2301,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy, u8 index, blInserted = false; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev); struct security_priv *psecuritypriv = &padapter->securitypriv; - u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 }; + u8 strZeroMacAddress[ETH_ALEN] = { 0x00 }; DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev)); @@ -2313,7 +2313,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy, blInserted = false; /* overwrite PMKID */ - for (index = 0 ; index<NUM_PMKID_CACHE; index++) + for (index = 0 ; index < NUM_PMKID_CACHE; index++) { if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN)) { /* BSSID is matched, the same AP => rewrite with new PMKID. */ @@ -2337,7 +2337,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy, memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN); psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true; - psecuritypriv->PMKIDIndex++ ; + psecuritypriv->PMKIDIndex++; if (psecuritypriv->PMKIDIndex == 16) { psecuritypriv->PMKIDIndex = 0; @@ -2357,7 +2357,7 @@ static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy, DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev)); - for (index = 0 ; index<NUM_PMKID_CACHE; index++) + for (index = 0 ; index < NUM_PMKID_CACHE; index++) { if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN)) { /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */ @@ -2387,7 +2387,7 @@ static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy, DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev)); - memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); + memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); psecuritypriv->PMKIDIndex = 0; return 0; @@ -2575,7 +2575,7 @@ static const struct net_device_ops rtw_cfg80211_monitor_if_ops = { .ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry, }; -static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, struct net_device **ndev) +static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, struct net_device **ndev) { int ret = 0; struct net_device* mon_ndev = NULL; @@ -2734,7 +2734,7 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true) return -EINVAL; - if (head_len<24) + if (head_len < 24) return -EINVAL; pbuf = rtw_zmalloc(head_len+tail_len); @@ -3276,7 +3276,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, enum ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7); } - else if ((rf_type == RF_1T2R) || (rf_type ==RF_2T2R)) + else if ((rf_type == RF_1T2R) || (rf_type == RF_2T2R)) { ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index db6528a01229..5059b874080e 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -59,7 +59,7 @@ void rtw_indicate_wx_assoc_event(struct adapter *padapter) wrqu.ap_addr.sa_family = ARPHRD_ETHER; - if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true) + if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) memcpy(wrqu.ap_addr.sa_data, pnetwork->MacAddress, ETH_ALEN); else memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN); @@ -86,11 +86,11 @@ static char *translate_scan(struct adapter *padapter, u32 ht_ielen = 0; char *custom = NULL; char *p; - u16 max_rate = 0, rate, ht_cap =false, vht_cap = false; + u16 max_rate = 0, rate, ht_cap = false, vht_cap = false; u32 i = 0; u8 bw_40MHz = 0, short_GI = 0; u16 mcs_rate = 0, vht_data_rate = 0; - u8 ie_offset = (pnetwork->network.Reserved[0] == 2? 0:12); + u8 ie_offset = (pnetwork->network.Reserved[0] == 2 ? 0 : 12); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); u8 ss, sq; @@ -113,11 +113,11 @@ static char *translate_scan(struct adapter *padapter, } else { p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12); } - if (p && ht_ielen>0) { + if (p && ht_ielen > 0) { struct rtw_ieee80211_ht_cap *pht_capie; ht_cap = true; pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2); - memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2); + memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2); bw_40MHz = (le16_to_cpu(pht_capie->cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0; short_GI = (le16_to_cpu(pht_capie->cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0; } @@ -163,7 +163,7 @@ static char *translate_scan(struct adapter *padapter, cap = le16_to_cpu(le_tmp); } - if (cap & (WLAN_CAPABILITY_IBSS |WLAN_CAPABILITY_BSS)) { + if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) { if (cap & WLAN_CAPABILITY_BSS) iwe.u.mode = IW_MODE_MASTER; else @@ -172,7 +172,7 @@ static char *translate_scan(struct adapter *padapter, start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); } - if (pnetwork->network.Configuration.DSConfig<1 /*|| pnetwork->network.Configuration.DSConfig>14*/) + if (pnetwork->network.Configuration.DSConfig < 1 /*|| pnetwork->network.Configuration.DSConfig > 14*/) pnetwork->network.Configuration.DSConfig = 1; /* Add frequency/channel */ @@ -197,12 +197,12 @@ static char *translate_scan(struct adapter *padapter, if (!custom) return start; p = custom; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); - while (pnetwork->network.SupportedRates[i]!= 0) { + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); + while (pnetwork->network.SupportedRates[i] != 0) { rate = pnetwork->network.SupportedRates[i]&0x7F; if (rate > max_rate) max_rate = rate; - p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); i++; } @@ -239,7 +239,7 @@ static char *translate_scan(struct adapter *padapter, if (!buf) return start; if (wpa_len > 0) { - p =buf; + p = buf; p += sprintf(p, "wpa_ie ="); for (i = 0; i < wpa_len; i++) p += sprintf(p, "%02x", wpa_ie[i]); @@ -258,7 +258,7 @@ static char *translate_scan(struct adapter *padapter, start = iwe_stream_add_point(info, start, stop, &iwe, buf); memset(&iwe, 0, sizeof(iwe)); - iwe.cmd =IWEVGENIE; + iwe.cmd = IWEVGENIE; iwe.u.data.length = wpa_len; start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie); } @@ -274,7 +274,7 @@ static char *translate_scan(struct adapter *padapter, start = iwe_stream_add_point(info, start, stop, &iwe, buf); memset(&iwe, 0, sizeof(iwe)); - iwe.cmd =IWEVGENIE; + iwe.cmd = IWEVGENIE; iwe.u.data.length = rsn_len; start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie); } @@ -298,13 +298,13 @@ static char *translate_scan(struct adapter *padapter, } while (cnt < total_ielen) { - if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen>2)) { + if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) { wpsie_ptr = &ie_ptr[cnt]; - iwe.cmd =IWEVGENIE; + iwe.cmd = IWEVGENIE; iwe.u.data.length = (u16)wps_ielen; start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr); } - cnt+=ie_ptr[cnt+1]+2; /* goto next */ + cnt += ie_ptr[cnt + 1] + 2; /* goto next */ } } @@ -352,8 +352,8 @@ static char *translate_scan(struct adapter *padapter, #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR) { s16 tmp_noise = 0; - rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(pnetwork->network.Configuration.DSConfig), &(tmp_noise)); - iwe.u.qual.noise = tmp_noise ; + rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(pnetwork->network.Configuration.DSConfig), &(tmp_noise)); + iwe.u.qual.noise = tmp_noise; } #else iwe.u.qual.noise = 0; /* noise level */ @@ -500,7 +500,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, DBG_871X("wep, set_tx = 1\n"); if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL) - ret = -EOPNOTSUPP ; + ret = -EOPNOTSUPP; } else { DBG_871X("wep, set_tx = 0\n"); @@ -508,12 +508,12 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to fw/cam */ if (wep_key_idx >= WEP_KEYS) { - ret = -EOPNOTSUPP ; + ret = -EOPNOTSUPP; goto exit; } memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength); - psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength; + psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength; rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true); } @@ -533,20 +533,20 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (strcmp(param->u.crypt.alg, "none") != 0) psta->ieee8021x_blocked = false; - if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| + if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } if (param->u.crypt.set_tx == 1) { /* pairwise key */ - memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */ /* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8); memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8); - padapter->securitypriv.busetkipkey =false; + padapter->securitypriv.busetkipkey = false; /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */ } @@ -556,11 +556,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, rtw_setstakey_cmd(padapter, psta, true, true); } else { /* group key */ if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) { - memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* only TKIP group key need to install this */ if (param->u.crypt.key_len > 16) { - memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8); - memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8); + memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8); + memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8); } padapter->securitypriv.binstallGrpkey = true; /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */ @@ -568,11 +568,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx; - rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true); + rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true); } else if (strcmp(param->u.crypt.alg, "BIP") == 0) { /* printk("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */ /* save the IGTK key, length 16 bytes */ - memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /*printk("IGTK key below:\n"); for (no = 0;no<16;no++) printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]); @@ -584,7 +584,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, } } - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta == NULL) { /* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */ } else { @@ -592,7 +592,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (strcmp(param->u.crypt.alg, "none") != 0) pbcmc_sta->ieee8021x_blocked = false; - if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| + if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) || (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } @@ -613,7 +613,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie u8 *buf = NULL; int group_cipher = 0, pairwise_cipher = 0; int ret = 0; - u8 null_addr[]= {0, 0, 0, 0, 0, 0}; + u8 null_addr[] = {0, 0, 0, 0, 0, 0}; if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) { _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS); @@ -630,13 +630,13 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie goto exit; } - memcpy(buf, pie , ielen); + memcpy(buf, pie, ielen); /* dump */ { int i; DBG_871X("\n wpa_ie(length:%d):\n", ielen); - for (i = 0;i<ielen;i =i+8) + for (i = 0; i < ielen; i = i + 8) DBG_871X("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]); } @@ -648,13 +648,13 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; - padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK; + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen); } if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; - padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK; + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen); } @@ -666,7 +666,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie switch (group_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; - padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled; + padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_; @@ -689,7 +689,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie switch (pairwise_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; - padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled; + padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_; @@ -712,7 +712,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS); {/* set wps_ie */ u16 cnt = 0; - u8 eid, wps_oui[4]={0x0, 0x50, 0xf2, 0x04}; + u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; while (cnt < ielen) { eid = buf[cnt]; @@ -762,7 +762,7 @@ static int rtw_wx_get_name(struct net_device *dev, struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); u32 ht_ielen = 0; char *p; - u8 ht_cap =false, vht_cap =false; + u8 ht_cap = false, vht_cap = false; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network; NDIS_802_11_RATES_EX* prates = NULL; @@ -772,7 +772,7 @@ static int rtw_wx_get_name(struct net_device *dev, if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) { /* parsing HT_CAP_IE */ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12); - if (p && ht_ielen>0) + if (p && ht_ielen > 0) ht_cap = true; prates = &pcur_bss->SupportedRates; @@ -846,7 +846,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType ; + enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType; int ret = 0; if (_FAIL == rtw_pwr_wakeup(padapter)) { @@ -878,7 +878,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a, DBG_871X("set_mode = IW_MODE_INFRA\n"); break; - default : + default: ret = -EINVAL; RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported \n", iw_operation_mode[wrqu->mode])); goto exit; @@ -895,7 +895,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a, } */ - if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false) { + if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) { ret = -EPERM; goto exit; @@ -939,8 +939,8 @@ static int rtw_wx_set_pmkid(struct net_device *dev, int intReturn = false; struct security_priv *psecuritypriv = &padapter->securitypriv; struct iw_pmksa* pPMK = (struct iw_pmksa *)extra; - u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 }; - u8 strIssueBssid[ ETH_ALEN ] = { 0x00 }; + u8 strZeroMacAddress[ETH_ALEN] = { 0x00 }; + u8 strIssueBssid[ETH_ALEN] = { 0x00 }; /* There are the BSSID information in the bssid.sa_data array. @@ -960,13 +960,13 @@ static int rtw_wx_set_pmkid(struct net_device *dev, blInserted = false; /* overwrite PMKID */ - for (j = 0 ; j<NUM_PMKID_CACHE; j++) { + for (j = 0; j < NUM_PMKID_CACHE; j++) { if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) { /* BSSID is matched, the same AP => rewrite with new PMKID. */ DBG_871X("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n"); memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN); - psecuritypriv->PMKIDList[ j ].bUsed = true; + psecuritypriv->PMKIDList[j].bUsed = true; psecuritypriv->PMKIDIndex = j+1; blInserted = true; break; @@ -981,25 +981,25 @@ static int rtw_wx_set_pmkid(struct net_device *dev, memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN); memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN); - psecuritypriv->PMKIDList[ psecuritypriv->PMKIDIndex ].bUsed = true; - psecuritypriv->PMKIDIndex++ ; + psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true; + psecuritypriv->PMKIDIndex++; if (psecuritypriv->PMKIDIndex == 16) psecuritypriv->PMKIDIndex = 0; } } else if (pPMK->cmd == IW_PMKSA_REMOVE) { DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n"); intReturn = true; - for (j = 0 ; j<NUM_PMKID_CACHE; j++) { + for (j = 0; j < NUM_PMKID_CACHE; j++) { if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) { /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */ eth_zero_addr(psecuritypriv->PMKIDList[j].Bssid); - psecuritypriv->PMKIDList[ j ].bUsed = false; + psecuritypriv->PMKIDList[j].bUsed = false; break; } } } else if (pPMK->cmd == IW_PMKSA_FLUSH) { DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n"); - memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); + memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); psecuritypriv->PMKIDIndex = 0; intReturn = true; } @@ -1093,7 +1093,7 @@ static int rtw_wx_get_range(struct net_device *dev, /* Commented by Albert 2009/10/13 */ /* The following code will proivde the security capability to network manager. */ /* If the driver doesn't provide this capability to network manager, */ -/* the WPA/WPA2 routers can't be choosen in the network manager. */ +/* the WPA/WPA2 routers can't be chosen in the network manager. */ /* #define IW_SCAN_CAPA_NONE 0x00 @@ -1106,11 +1106,11 @@ static int rtw_wx_get_range(struct net_device *dev, #define IW_SCAN_CAPA_TIME 0x40 */ - range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2| - IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP; + range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; - range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |IW_SCAN_CAPA_BSSID| - IW_SCAN_CAPA_CHANNEL|IW_SCAN_CAPA_MODE|IW_SCAN_CAPA_RATE; + range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE | IW_SCAN_CAPA_BSSID | + IW_SCAN_CAPA_CHANNEL | IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE; return 0; } @@ -1287,7 +1287,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, goto exit; } - if (!padapter->hw_init_completed ) { + if (!padapter->hw_init_completed) { ret = -1; goto exit; } @@ -1330,7 +1330,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, } else if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE && !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) { - int len = wrqu->data.length -WEXT_CSCAN_HEADER_SIZE; + int len = wrqu->data.length - WEXT_CSCAN_HEADER_SIZE; char *pos = extra+WEXT_CSCAN_HEADER_SIZE; char section; char sec_len; @@ -1339,7 +1339,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, /* DBG_871X("%s COMBO_SCAN header is recognized\n", __func__); */ while (len >= 1) { - section = *(pos++); len-= 1; + section = *(pos++); len -= 1; switch (section) { case WEXT_CSCAN_SSID_SECTION: @@ -1349,9 +1349,9 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } - sec_len = *(pos++); len-= 1; + sec_len = *(pos++); len -= 1; - if (sec_len>0 && sec_len<=len) { + if (sec_len > 0 && sec_len <= len) { ssid[ssid_index].SsidLength = sec_len; memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */ @@ -1359,29 +1359,29 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, ssid_index++; } - pos+=sec_len; len-=sec_len; + pos += sec_len; len -= sec_len; break; case WEXT_CSCAN_CHANNEL_SECTION: /* DBG_871X("WEXT_CSCAN_CHANNEL_SECTION\n"); */ - pos+= 1; len-= 1; + pos += 1; len -= 1; break; case WEXT_CSCAN_ACTV_DWELL_SECTION: /* DBG_871X("WEXT_CSCAN_ACTV_DWELL_SECTION\n"); */ - pos+=2; len-=2; + pos += 2; len -= 2; break; case WEXT_CSCAN_PASV_DWELL_SECTION: /* DBG_871X("WEXT_CSCAN_PASV_DWELL_SECTION\n"); */ - pos+=2; len-=2; + pos += 2; len -= 2; break; case WEXT_CSCAN_HOME_DWELL_SECTION: /* DBG_871X("WEXT_CSCAN_HOME_DWELL_SECTION\n"); */ - pos+=2; len-=2; + pos += 2; len -= 2; break; case WEXT_CSCAN_TYPE_SECTION: /* DBG_871X("WEXT_CSCAN_TYPE_SECTION\n"); */ - pos+= 1; len-= 1; + pos += 1; len -= 1; break; default: /* DBG_871X("Unknown CSCAN section %c\n", section); */ @@ -1391,7 +1391,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, } - /* jeff: it has still some scan paramater to parse, we only do this now... */ + /* jeff: it has still some scan parameter to parse, we only do this now... */ _status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT); } else { @@ -1463,7 +1463,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a, && rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true && true == rtw_validate_ssid(&(pnetwork->network.Ssid))) { - ev =translate_scan(padapter, a, pnetwork, ev, stop); + ev = translate_scan(padapter, a, pnetwork, ev, stop); } plist = get_next(plist); @@ -1481,7 +1481,7 @@ exit: DBG_871X("DBG_IOCTL %s:%d return %d\n", __func__, __LINE__, ret); #endif - return ret ; + return ret; } @@ -1570,7 +1570,7 @@ static int rtw_wx_set_essid(struct net_device *dev, pnetwork->network.Ssid.Ssid)); if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) && - (pnetwork->network.Ssid.SsidLength ==ndis_ssid.SsidLength)) { + (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: find match, set infra mode\n")); @@ -1651,7 +1651,7 @@ static int rtw_wx_set_rate(struct net_device *dev, u32 target_rate = wrqu->bitrate.value; u32 fixed = wrqu->bitrate.fixed; u32 ratevalue = 0; - u8 mpdatarate[NumRates]={11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff}; + u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff}; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n")); RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed)); @@ -1706,8 +1706,8 @@ static int rtw_wx_set_rate(struct net_device *dev, set_rate: - for (i = 0; i<NumRates; i++) { - if (ratevalue ==mpdatarate[i]) { + for (i = 0; i < NumRates; i++) { + if (ratevalue == mpdatarate[i]) { datarates[i] = mpdatarate[i]; if (fixed == 0) break; @@ -1855,7 +1855,7 @@ static int rtw_wx_set_enc(struct net_device *dev, padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */ authmode = Ndis802_11AuthModeOpen; - padapter->securitypriv.ndisauthtype =authmode; + padapter->securitypriv.ndisauthtype = authmode; goto exit; } @@ -1881,7 +1881,7 @@ static int rtw_wx_set_enc(struct net_device *dev, padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; authmode = Ndis802_11AuthModeOpen; - padapter->securitypriv.ndisauthtype =authmode; + padapter->securitypriv.ndisauthtype = authmode; } else if (erq->flags & IW_ENCODE_RESTRICTED) { DBG_871X("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n"); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; @@ -1891,7 +1891,7 @@ static int rtw_wx_set_enc(struct net_device *dev, padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_; padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_; authmode = Ndis802_11AuthModeShared; - padapter->securitypriv.ndisauthtype =authmode; + padapter->securitypriv.ndisauthtype = authmode; } else { DBG_871X("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags); @@ -1900,7 +1900,7 @@ static int rtw_wx_set_enc(struct net_device *dev, padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; authmode = Ndis802_11AuthModeOpen; - padapter->securitypriv.ndisauthtype =authmode; + padapter->securitypriv.ndisauthtype = authmode; } wep.KeyIndex = key; @@ -1909,7 +1909,7 @@ static int rtw_wx_set_enc(struct net_device *dev, wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); } else { - wep.KeyLength = 0 ; + wep.KeyLength = 0; if (keyindex_provided == 1) { /* set key_id only, no given KeyMaterial(erq->length == 0). */ padapter->securitypriv.dot11PrivacyKeyIndex = key; @@ -2093,7 +2093,7 @@ static int rtw_wx_set_auth(struct net_device *dev, padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */ - padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeOpen; + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen; } break; @@ -2181,7 +2181,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev, param->u.crypt.set_tx = 0; } - param->u.crypt.idx = (pencoding->flags&0x00FF) -1 ; + param->u.crypt.idx = (pencoding->flags & 0x00FF) - 1; if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) memcpy(param->u.crypt.seq, pext->rx_seq, 8); @@ -2459,7 +2459,7 @@ static int rtw_get_ap_info(struct net_device *dev, /* pdata->length = 0;? */ pdata->flags = 0; - if (pdata->length>=32) { + if (pdata->length >= 32) { if (copy_from_user(data, pdata->pointer, 32)) { ret = -EINVAL; goto exit; @@ -2492,13 +2492,13 @@ static int rtw_get_ap_info(struct net_device *dev, DBG_871X("BSSID:" MAC_FMT "\n", MAC_ARG(bssid)); pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); - if (pbuf && (wpa_ielen>0)) { + if (pbuf && (wpa_ielen > 0)) { pdata->flags = 1; break; } pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); - if (pbuf && (wpa_ielen>0)) { + if (pbuf && (wpa_ielen > 0)) { pdata->flags = 2; break; } @@ -2510,7 +2510,7 @@ static int rtw_get_ap_info(struct net_device *dev, spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); - if (pdata->length>=34) { + if (pdata->length >= 34) { if (copy_to_user((u8 __force __user *)pdata->pointer+32, (u8 *)&pdata->flags, 1)) { ret = -EINVAL; goto exit; @@ -2541,7 +2541,7 @@ static int rtw_set_pid(struct net_device *dev, selector = *pdata; if (selector < 3 && selector >= 0) { padapter->pid[selector] = *(pdata+1); - DBG_871X("%s set pid[%d]=%d\n", __func__, selector , padapter->pid[selector]); + DBG_871X("%s set pid[%d]=%d\n", __func__, selector, padapter->pid[selector]); } else DBG_871X("%s selector %d error\n", __func__, selector); @@ -2563,7 +2563,7 @@ static int rtw_wps_start(struct net_device *dev, u32 u32wps_start = 0; unsigned int uintRet = 0; - if ((true == padapter->bDriverStopped) ||(true ==padapter->bSurpriseRemoved) || (NULL == pdata)) { + if ((true == padapter->bDriverStopped) || (true == padapter->bSurpriseRemoved) || (NULL == pdata)) { ret = -EINVAL; goto exit; } @@ -2733,8 +2733,8 @@ static int rtw_dbg_port(struct net_device *dev, break; case 0x01: /* dbg mode */ padapter->recvpriv.is_signal_dbg = 1; - extra_arg = extra_arg>100?100:extra_arg; - padapter->recvpriv.signal_strength_dbg =extra_arg; + extra_arg = extra_arg > 100 ? 100 : extra_arg; + padapter->recvpriv.signal_strength_dbg = extra_arg; break; } break; @@ -2806,7 +2806,7 @@ static int rtw_dbg_port(struct net_device *dev, DBG_871X("ampdu_enable = %d\n", psta->htpriv.ampdu_enable); DBG_871X("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap); - for (i = 0;i<16;i++) { + for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; if (preorder_ctrl->enable) DBG_871X("tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq); @@ -2845,7 +2845,7 @@ static int rtw_dbg_port(struct net_device *dev, spin_lock_bh(&pstapriv->sta_hash_lock); - for (i = 0; i< NUM_STA; i++) { + for (i = 0; i < NUM_STA; i++) { phead = &(pstapriv->sta_hash[i]); plist = get_next(phead); @@ -2872,7 +2872,7 @@ static int rtw_dbg_port(struct net_device *dev, - for (j = 0;j<16;j++) { + for (j = 0; j < 16; j++) { preorder_ctrl = &psta->recvreorder_ctrl[j]; if (preorder_ctrl->enable) DBG_871X("tid =%d, indicate_seq =%d\n", j, preorder_ctrl->indicate_seq); @@ -2900,7 +2900,7 @@ static int rtw_dbg_port(struct net_device *dev, DBG_871X("enable driver ctrl vcs = %d\n", extra_arg); padapter->driver_vcs_en = 1; - if (extra_arg>2) + if (extra_arg > 2) padapter->driver_vcs_type = 1; else padapter->driver_vcs_type = extra_arg; @@ -3048,7 +3048,7 @@ static int rtw_dbg_port(struct net_device *dev, if (max_rx_rate < 0xc) { /* max_rx_rate < MSC0 -> B or G -> disable HT */ pregistrypriv->ht_enable = 0; - for (i = 0; i<NumRates; i++) { + for (i = 0; i < NumRates; i++) { if (pmlmeext->datarate[i] > max_rx_rate) pmlmeext->datarate[i] = 0xff; } @@ -3057,7 +3057,7 @@ static int rtw_dbg_port(struct net_device *dev, else if (max_rx_rate < 0x1c) { /* mcs0~mcs15 */ u32 mcs_bitmap = 0x0; - for (i = 0; i<((max_rx_rate+1)-0xc); i++) + for (i = 0; i < ((max_rx_rate + 1) - 0xc); i++) mcs_bitmap |= BIT(i); set_mcs_rate_by_mask(pmlmeext->default_supported_mcs_set, mcs_bitmap); @@ -3129,9 +3129,9 @@ static int rtw_dbg_port(struct net_device *dev, */ int value; - DBG_871X("Set GPIO Direction! arg = %d , extra_arg =%d\n", arg , extra_arg); + DBG_871X("Set GPIO Direction! arg = %d , extra_arg =%d\n", arg, extra_arg); value = rtw_config_gpio(dev, arg, extra_arg); - DBG_871X("Set GPIO Direction %s\n", (value ==-1)?"Fail!!!":"Success"); + DBG_871X("Set GPIO Direction %s\n", (value == -1) ? "Fail!!!" : "Success"); break; } case 0x27: /* Set GPIO output direction value */ @@ -3142,15 +3142,15 @@ static int rtw_dbg_port(struct net_device *dev, */ int value; - DBG_871X("Set GPIO Value! arg = %d , extra_arg =%d\n", arg , extra_arg); + DBG_871X("Set GPIO Value! arg = %d , extra_arg =%d\n", arg, extra_arg); value = rtw_set_gpio_output_value(dev, arg, extra_arg); - DBG_871X("Set GPIO Value %s\n", (value ==-1)?"Fail!!!":"Success"); + DBG_871X("Set GPIO Value %s\n", (value == -1) ? "Fail!!!" : "Success"); break; } #endif case 0xaa: { - if ((extra_arg & 0x7F)> 0x3F) extra_arg = 0xFF; + if ((extra_arg & 0x7F) > 0x3F) extra_arg = 0xFF; DBG_871X("chang data rate to :0x%02x\n", extra_arg); padapter->fix_rate = extra_arg; } @@ -3161,7 +3161,7 @@ static int rtw_dbg_port(struct net_device *dev, mac_reg_dump(RTW_DBGDUMP, padapter); else if (extra_arg == 1) bb_reg_dump(RTW_DBGDUMP, padapter); - else if (extra_arg ==2) + else if (extra_arg == 2) rf_reg_dump(RTW_DBGDUMP, padapter); } break; @@ -3170,8 +3170,8 @@ static int rtw_dbg_port(struct net_device *dev, { u32 odm_flag; - if (0xf ==extra_arg) { - rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag); + if (0xf == extra_arg) { + rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag); DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag); DBG_871X("extra_arg = 0 - disable all dynamic func\n"); DBG_871X("extra_arg = 1 - disable DIG- BIT(0)\n"); @@ -3187,7 +3187,7 @@ static int rtw_dbg_port(struct net_device *dev, extra_arg = 3 - turn on all dynamic func */ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg)); - rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag); + rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag); DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag); } } @@ -3257,7 +3257,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value) /* ret = ieee80211_wpa_enable(ieee, value); */ switch ((value)&0xff) { - case 1 : /* WPA */ + case 1: /* WPA */ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled; break; @@ -3373,21 +3373,16 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) /* down(&ieee->wx_sem); */ - if (p->length < sizeof(struct ieee_param) || !p->pointer) { - ret = -EINVAL; - goto out; - } + if (!p->pointer || p->length != sizeof(struct ieee_param)) + return -EINVAL; param = rtw_malloc(p->length); - if (param == NULL) { - ret = -ENOMEM; - goto out; - } + if (param == NULL) + return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { kfree(param); - ret = -EFAULT; - goto out; + return -EFAULT; } switch (param->cmd) { @@ -3421,12 +3416,8 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) kfree(param); -out: - /* up(&ieee->wx_sem); */ - return ret; - } static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len) @@ -3437,7 +3428,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, struct sta_info *psta = NULL, *pbcmc_sta = NULL; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; - struct security_priv* psecuritypriv =&(padapter->securitypriv); + struct security_priv* psecuritypriv = &(padapter->securitypriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_871X("%s\n", __func__); @@ -3490,7 +3481,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, DBG_871X("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len); - if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0)) { + if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) { ret = -EINVAL; goto exit; } @@ -3532,7 +3523,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength); - psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength; + psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength; rtw_ap_set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx, 1); } else { @@ -3558,7 +3549,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (strcmp(param->u.crypt.alg, "WEP") == 0) { DBG_871X("%s, set group_key, WEP\n", __func__); - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) @@ -3569,7 +3560,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, psecuritypriv->dot118021XGrpPrivacy = _TKIP_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */ /* set mic key */ @@ -3584,7 +3575,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, psecuritypriv->dot118021XGrpPrivacy = _AES_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); } else { DBG_871X("%s, set group_key, none\n", __func__); @@ -3599,7 +3590,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = false; pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */ @@ -3613,7 +3604,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { if (param->u.crypt.set_tx == 1) { - memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); if (strcmp(param->u.crypt.alg, "WEP") == 0) { DBG_871X("%s, set pairwise key, WEP\n", __func__); @@ -3650,7 +3641,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, } else { /* group key??? */ if (strcmp(param->u.crypt.alg, "WEP") == 0) { - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if (param->u.crypt.key_len == 13) @@ -3658,7 +3649,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _TKIP_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */ /* set mic key */ @@ -3670,7 +3661,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _AES_; - memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); + memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len)); } else { psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; } @@ -3683,7 +3674,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); - pbcmc_sta =rtw_get_bcmc_stainfo(padapter); + pbcmc_sta = rtw_get_bcmc_stainfo(padapter); if (pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = false; pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */ @@ -3715,7 +3706,7 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2); - if ((pstapriv->max_num_sta>NUM_STA) || (pstapriv->max_num_sta<= 0)) + if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0)) pstapriv->max_num_sta = NUM_STA; @@ -3840,12 +3831,12 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param) psta = rtw_get_stainfo(pstapriv, param->sta_addr); if (psta) { - u8 updated =false; + u8 updated = false; /* DBG_871X("free psta =%p, aid =%d\n", psta, psta->aid); */ spin_lock_bh(&pstapriv->asoc_list_lock); - if (list_empty(&psta->asoc_list) ==false) { + if (list_empty(&psta->asoc_list) == false) { list_del_init(&psta->asoc_list); pstapriv->asoc_list_cnt--; updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING); @@ -3904,12 +3895,12 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par ht_20mhz_set : BIT(5) */ - psta_data->sta_set =((psta->nonerp_set) | - (psta->no_short_slot_time_set <<1) | - (psta->no_short_preamble_set <<2) | - (psta->no_ht_gf_set <<3) | - (psta->no_ht_set <<4) | - (psta->ht_20mhz_set <<5)); + psta_data->sta_set = ((psta->nonerp_set) | + (psta->no_short_slot_time_set << 1) | + (psta->no_short_preamble_set << 2) | + (psta->no_ht_gf_set << 3) | + (psta->no_ht_set << 4) | + (psta->ht_20mhz_set << 5)); psta_data->tx_supp_rates_len = psta->bssratelen; memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen); @@ -3978,7 +3969,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param) static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len) { int ret = 0; - unsigned char wps_oui[4]={0x0, 0x50, 0xf2, 0x04}; + unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); @@ -3995,7 +3986,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, kfree(pmlmepriv->wps_beacon_ie); pmlmepriv->wps_beacon_ie = NULL; - if (ie_len>0) { + if (ie_len > 0) { pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len); pmlmepriv->wps_beacon_ie_len = ie_len; if (pmlmepriv->wps_beacon_ie == NULL) { @@ -4033,7 +4024,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par kfree(pmlmepriv->wps_probe_resp_ie); pmlmepriv->wps_probe_resp_ie = NULL; - if (ie_len>0) { + if (ie_len > 0) { pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len); pmlmepriv->wps_probe_resp_ie_len = ie_len; if (pmlmepriv->wps_probe_resp_ie == NULL) { @@ -4066,7 +4057,7 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par kfree(pmlmepriv->wps_assoc_resp_ie); pmlmepriv->wps_assoc_resp_ie = NULL; - if (ie_len>0) { + if (ie_len > 0) { pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len); pmlmepriv->wps_assoc_resp_ie_len = ie_len; if (pmlmepriv->wps_assoc_resp_ie == NULL) { @@ -4200,28 +4191,19 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) * so, we just check hw_init_completed */ - if (!padapter->hw_init_completed) { - ret = -EPERM; - goto out; - } - + if (!padapter->hw_init_completed) + return -EPERM; - /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */ - if (!p->pointer) { - ret = -EINVAL; - goto out; - } + if (!p->pointer || p->length != sizeof(*param)) + return -EINVAL; param = rtw_malloc(p->length); - if (param == NULL) { - ret = -ENOMEM; - goto out; - } + if (param == NULL) + return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { kfree(param); - ret = -EFAULT; - goto out; + return -EFAULT; } /* DBG_871X("%s, cmd =%d\n", __func__, param->cmd); */ @@ -4321,13 +4303,8 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; - kfree(param); - -out: - return ret; - } static int rtw_wx_set_priv(struct net_device *dev, @@ -4380,11 +4357,11 @@ static int rtw_wx_set_priv(struct net_device *dev, struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); u8 *probereq_wpsie = ext; int probereq_wpsie_len = len; - u8 wps_oui[4]={0x0, 0x50, 0xf2, 0x04}; + u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) && (!memcmp(&probereq_wpsie[2], wps_oui, 4))) { - cp_sz = probereq_wpsie_len>MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN:probereq_wpsie_len; + cp_sz = probereq_wpsie_len > MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN : probereq_wpsie_len; if (pmlmepriv->wps_probe_req_ie) { pmlmepriv->wps_probe_req_ie_len = 0; @@ -4521,7 +4498,7 @@ static int rtw_test( ret = rtw_hal_fill_h2c_cmd(padapter, param[0], count-1, ¶m[1]); pos = sprintf(extra, "H2C ID = 0x%02x content =", param[0]); - for (i = 1; i<count; i++) + for (i = 1; i < count; i++) pos += sprintf(extra+pos, "%02x,", param[i]); extra[pos] = 0; pos--; @@ -4659,14 +4636,14 @@ static const struct iw_priv_args rtw_private_args[] = { }, { SIOCIWFIRSTPRIV + 0x11, - IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , "p2p_get" + IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "p2p_get" }, { SIOCIWFIRSTPRIV + 0x12, 0, 0, "NULL" }, { SIOCIWFIRSTPRIV + 0x13, - IW_PRIV_TYPE_CHAR | 64, IW_PRIV_TYPE_CHAR | 64 , "p2p_get2" + IW_PRIV_TYPE_CHAR | 64, IW_PRIV_TYPE_CHAR | 64, "p2p_get2" }, { SIOCIWFIRSTPRIV + 0x14, @@ -4674,14 +4651,14 @@ static const struct iw_priv_args rtw_private_args[] = { }, { SIOCIWFIRSTPRIV + 0x15, - IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | 1024 , "tdls_get" + IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | 1024, "tdls_get" }, { SIOCIWFIRSTPRIV + 0x16, IW_PRIV_TYPE_CHAR | 64, 0, "pm_set" }, - {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ , 0 , "rereg_nd_name"}, + {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"}, {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"}, {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"}, { @@ -4690,10 +4667,10 @@ static const struct iw_priv_args rtw_private_args[] = { }, #ifdef CONFIG_WOWLAN - { MP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */ + { MP_WOW_ENABLE, IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */ #endif #ifdef CONFIG_AP_WOWLAN - { MP_AP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "ap_wow_mode" }, /* set */ + { MP_AP_WOW_ENABLE, IW_PRIV_TYPE_CHAR | 1024, 0, "ap_wow_mode" }, /* set */ #endif }; @@ -4744,7 +4721,7 @@ static iw_handler rtw_private_handler[] = { static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - struct iw_statistics *piwstats =&padapter->iwstats; + struct iw_statistics *piwstats = &padapter->iwstats; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; @@ -4783,10 +4760,10 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev) rtw_ps_deny(padapter, PS_DENY_IOCTL); LeaveAllPowerSaveModeDirect(padapter); - rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&info, false); + rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &info, false); /* ODM_InbandNoise_Monitor(podmpriv, true, 0x20, 100); */ rtw_ps_deny_cancel(padapter, PS_DENY_IOCTL); - rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(info.chan), &(padapter->recvpriv.noise)); + rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(info.chan), &(padapter->recvpriv.noise)); DBG_871X("chan:%d, noise_level:%d\n", info.chan, padapter->recvpriv.noise); } #endif diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c index 52a5b3156b28..f121dbfaa029 100644 --- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c @@ -64,7 +64,7 @@ void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted) indicate_wx_scan_complete_event(padapter); } -static RT_PMKID_LIST backupPMKIDList[ NUM_PMKID_CACHE ]; +static RT_PMKID_LIST backupPMKIDList[NUM_PMKID_CACHE]; void rtw_reset_securitypriv(struct adapter *adapter) { u8 backupPMKIDIndex = 0; @@ -83,7 +83,7 @@ void rtw_reset_securitypriv(struct adapter *adapter) /* Backup the btkip_countermeasure information. */ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */ - memcpy(&backupPMKIDList[ 0 ], &adapter->securitypriv.PMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); + memcpy(&backupPMKIDList[0], &adapter->securitypriv.PMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); backupPMKIDIndex = adapter->securitypriv.PMKIDIndex; backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure; backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time; @@ -95,7 +95,7 @@ void rtw_reset_securitypriv(struct adapter *adapter) /* Added by Albert 2009/02/18 */ /* Restore the PMK information to securitypriv structure for the following connection. */ - memcpy(&adapter->securitypriv.PMKIDList[ 0 ], &backupPMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); + memcpy(&adapter->securitypriv.PMKIDList[0], &backupPMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE); adapter->securitypriv.PMKIDIndex = backupPMKIDIndex; adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure; adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time; diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index 47e984d5b7cb..d29f59bbb613 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -202,8 +202,8 @@ module_param(rtw_tx_pwr_by_rate, int, 0644); MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse"); int _netdev_open(struct net_device *pnetdev); -int netdev_open (struct net_device *pnetdev); -static int netdev_close (struct net_device *pnetdev); +int netdev_open(struct net_device *pnetdev); +static int netdev_close(struct net_device *pnetdev); static void loadparam(struct adapter *padapter, _nic_hdl pnetdev) { @@ -221,7 +221,7 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev) registry_par->channel = (u8)rtw_channel; registry_par->wireless_mode = (u8)rtw_wireless_mode; - registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ; + registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense; registry_par->vcs_type = (u8)rtw_vcs_type; registry_par->rts_thresh = (u16)rtw_rts_thresh; registry_par->frag_thresh = (u16)rtw_frag_thresh; @@ -554,7 +554,7 @@ u32 rtw_start_drv_threads(struct adapter *padapter) return _status; } -void rtw_stop_drv_threads (struct adapter *padapter) +void rtw_stop_drv_threads(struct adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n")); @@ -1154,7 +1154,7 @@ void rtw_dev_unload(struct adapter *padapter) DBG_871X("stop cmdthd timeout\n"); break; } else { - cnt ++; + cnt++; DBG_871X("cmdthd is running(%d)\n", cnt); msleep(10); } @@ -1274,7 +1274,7 @@ void rtw_suspend_wow(struct adapter *padapter) padapter->intf_stop(padapter); } - /* 2.1 clean interupt */ + /* 2.1 clean interrupt */ if (padapter->HalFunc.clear_interrupt) padapter->HalFunc.clear_interrupt(padapter); @@ -1348,7 +1348,7 @@ void rtw_suspend_ap_wow(struct adapter *padapter) /* 2. disable interrupt */ rtw_hal_disable_interrupt(padapter); /* It need wait for leaving 32K. */ - /* 2.1 clean interupt */ + /* 2.1 clean interrupt */ if (padapter->HalFunc.clear_interrupt) padapter->HalFunc.clear_interrupt(padapter); @@ -1799,7 +1799,7 @@ int rtw_resume_common(struct adapter *padapter) pwrpriv->pno_in_resume = false; #endif } - DBG_871X_LEVEL(_drv_always_, "%s:%d in %d ms\n", __func__ , ret, + DBG_871X_LEVEL(_drv_always_, "%s:%d in %d ms\n", __func__, ret, jiffies_to_msecs(jiffies - start_time)); return ret; diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index f5614e56371e..4238209ec175 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c @@ -289,7 +289,7 @@ void *rtw_cbuf_pop(struct rtw_cbuf *cbuf) } /** - * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization + * rtw_cbuf_alloc - allocate a rtw_cbuf with given size and do initialization * @size: size of pointer * * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c index 643caccc3069..60c35d92ba29 100644 --- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c @@ -35,7 +35,8 @@ void rtw_os_recv_resource_free(struct recv_priv *precvpriv) for (i = 0; i < NR_RECVFRAME; i++) { if (precvframe->u.hdr.pkt) { - dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */ + /* free skb by driver */ + dev_kfree_skb_any(precvframe->u.hdr.pkt); precvframe->u.hdr.pkt = NULL; } precvframe++; @@ -80,7 +81,10 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) && eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) { - /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */ + /* + * remove RFC1042 or Bridge-Tunnel encapsulation and replace + * EtherType + */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN); @@ -101,7 +105,7 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt struct mlme_priv*pmlmepriv = &padapter->mlmepriv; int ret; - /* Indicat the packets to upper layer */ + /* Indicate the packets to upper layer */ if (pkt) { if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { _pkt *pskb2 = NULL; @@ -109,11 +113,7 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt struct sta_priv *pstapriv = &padapter->stapriv; int bmcast = IS_MCAST(pattrib->dst); - /* DBG_871X("bmcast =%d\n", bmcast); */ - if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) { - /* DBG_871X("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */ - if (bmcast) { psta = rtw_get_bcmc_stainfo(padapter); pskb2 = rtw_skb_clone(pkt); @@ -123,9 +123,6 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt if (psta) { struct net_device *pnetdev = (struct net_device*)padapter->pnetdev; - - /* DBG_871X("directly forwarding to the rtw_xmit_entry\n"); */ - /* skb->ip_summed = CHECKSUM_NONE; */ pkt->dev = pnetdev; skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt)); @@ -197,7 +194,7 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup) key_type |= NL80211_KEYTYPE_PAIRWISE; } - cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[ 0 ], key_type, -1, + cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[0], key_type, -1, NULL, GFP_ATOMIC); memset(&ev, 0x00, sizeof(ev)); @@ -208,7 +205,7 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup) } ev.src_addr.sa_family = ARPHRD_ETHER; - memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN); + memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN); memset(&wrqu, 0x00, sizeof(wrqu)); wrqu.data.length = sizeof(ev); @@ -223,7 +220,7 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec DBG_871X("eth rx: got eth_type = 0x%x\n", pattrib->eth_type); - if (psta && psta->isrc && psta->pid>0) { + if (psta && psta->isrc && psta->pid > 0) { u16 rx_pid; rx_pid = *(u16*)(skb->data+ETH_HLEN); @@ -234,14 +231,10 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec if (rx_pid == psta->pid) { int i; u16 len = *(u16*)(skb->data+ETH_HLEN+2); - /* u16 ctrl_type = *(u16*)(skb->data+ETH_HLEN+4); */ - - /* DBG_871X("eth, RC: len = 0x%x, ctrl_type = 0x%x\n", len, ctrl_type); */ DBG_871X("eth, RC: len = 0x%x\n", len); - for (i = 0;i<len;i++) + for (i = 0; i < len; i++) DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+4+i)); - /* DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+6+i)); */ DBG_871X("eth, RC-end\n"); } @@ -291,7 +284,8 @@ int rtw_recv_indicatepkt(struct adapter *padapter, union recv_frame *precv_frame rtw_os_recv_indicate_pkt(padapter, skb, pattrib); - precv_frame->u.hdr.pkt = NULL; /* pointers to NULL before rtw_free_recvframe() */ + /* pointers to NULL before rtw_free_recvframe() */ + precv_frame->u.hdr.pkt = NULL; rtw_free_recvframe(precv_frame, pfree_recv_queue); @@ -301,11 +295,11 @@ int rtw_recv_indicatepkt(struct adapter *padapter, union recv_frame *precv_frame _recv_indicatepkt_drop: - /* enqueue back to free_recv_queue */ - rtw_free_recvframe(precv_frame, pfree_recv_queue); + /* enqueue back to free_recv_queue */ + rtw_free_recvframe(precv_frame, pfree_recv_queue); - DBG_COUNTER(padapter->rx_logs.os_indicate_err); - return _FAIL; + DBG_COUNTER(padapter->rx_logs.os_indicate_err); + return _FAIL; } void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl) diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index 859f4a0afb95..b093b5629171 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -108,7 +108,7 @@ static void sdio_free_irq(struct dvobj_priv *dvobj) err = sdio_release_irq(func); if (err) { dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++; - DBG_871X_LEVEL(_drv_err_,"%s: sdio_release_irq FAIL(%d)!\n", __func__, err); + DBG_871X_LEVEL(_drv_err_, "%s: sdio_release_irq FAIL(%d)!\n", __func__, err); } else dvobj->drv_dbg.dbg_sdio_free_irq_cnt++; sdio_release_host(func); @@ -324,7 +324,7 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct padapter->dvobj = dvobj; dvobj->if1 = padapter; - padapter->bDriverStopped =true; + padapter->bDriverStopped = true; dvobj->padapters = padapter; padapter->iface_id = 0; @@ -430,7 +430,7 @@ static void rtw_sdio_if1_deinit(struct adapter *if1) rtw_cancel_all_timer(if1); #ifdef CONFIG_WOWLAN - adapter_to_pwrctl(if1)->wowlan_mode =false; + adapter_to_pwrctl(if1)->wowlan_mode = false; DBG_871X_LEVEL(_drv_always_, "%s wowlan_mode:%d\n", __func__, adapter_to_pwrctl(if1)->wowlan_mode); #endif /* CONFIG_WOWLAN */ @@ -500,7 +500,7 @@ free_dvobj: if (status != _SUCCESS) sdio_dvobj_deinit(func); exit: - return status == _SUCCESS?0:-ENODEV; + return status == _SUCCESS ? 0 : -ENODEV; } static void rtw_dev_remove(struct sdio_func *func) @@ -548,7 +548,7 @@ extern int pm_netdev_close(struct net_device *pnetdev, u8 bnormal); static int rtw_sdio_suspend(struct device *dev) { - struct sdio_func *func =dev_to_sdio_func(dev); + struct sdio_func *func = dev_to_sdio_func(dev); struct dvobj_priv *psdpriv = sdio_get_drvdata(func); struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv); struct adapter *padapter = psdpriv->if1; @@ -585,7 +585,7 @@ static int rtw_resume_process(struct adapter *padapter) static int rtw_sdio_resume(struct device *dev) { - struct sdio_func *func =dev_to_sdio_func(dev); + struct sdio_func *func = dev_to_sdio_func(dev); struct dvobj_priv *psdpriv = sdio_get_drvdata(func); struct adapter *padapter = psdpriv->if1; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c index 4e81bc13e57d..fec8a8caaa46 100644 --- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c @@ -15,16 +15,16 @@ uint rtw_remainder_len(struct pkt_file *pfile) return (pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start))); } -void _rtw_open_pktfile (_pkt *pktptr, struct pkt_file *pfile) +void _rtw_open_pktfile(_pkt *pktptr, struct pkt_file *pfile) { pfile->pkt = pktptr; pfile->cur_addr = pfile->buf_start = pktptr->data; pfile->pkt_len = pfile->buf_len = pktptr->len; - pfile->cur_buffer = pfile->buf_start ; + pfile->cur_buffer = pfile->buf_start; } -uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen) +uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen) { uint len = 0; diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c index 17c4131f5f62..c6f9375468eb 100644 --- a/drivers/staging/rts5208/rtsx_chip.c +++ b/drivers/staging/rts5208/rtsx_chip.c @@ -940,7 +940,8 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip) if (maybe_support_aspm) chip->aspm_l0s_l1_en = 0x03; - dev_dbg(rtsx_dev(chip), "aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n", + dev_dbg(rtsx_dev(chip), + "aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n", chip->aspm_level[0], chip->aspm_level[1]); if (chip->aspm_l0s_l1_en) { diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile index 1cf3849cef23..b89aa4c12e9d 100644 --- a/drivers/staging/sm750fb/Makefile +++ b/drivers/staging/sm750fb/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_FB_SM750) += sm750fb.o -sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o -sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o +sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o \ + ddk750_chip.o ddk750_power.o ddk750_mode.o \ + ddk750_display.o ddk750_swi2c.o ddk750_sii164.o \ + ddk750_dvi.o ddk750_hwi2c.o diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c index 5f1bda37f86d..822ceac83068 100644 --- a/drivers/staging/speakup/keyhelp.c +++ b/drivers/staging/speakup/keyhelp.c @@ -49,7 +49,7 @@ static int cur_item, nstates; static void build_key_data(void) { u_char *kp, counters[MAXFUNCS], ch, ch1; - u_short *p_key = key_data, key; + u_short *p_key, key; int i, offset = 1; nstates = (int)(state_tbl[-1]); diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 488f2539aa9a..02471d932d71 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc) return 0; } else if (tmpx < vc->vc_cols - 2 && (ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) && - get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) { + get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) { tmp_pos += 2; tmpx++; } else { @@ -2117,7 +2117,8 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym, spk_keydown = 0; goto out; } - value = spk_lastkey = pad_chars[value]; + value = pad_chars[value]; + spk_lastkey = value; spk_keydown++; spk_parked &= 0xfe; goto no_map; diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c index a8b4d0c5ab7e..032f3264fba1 100644 --- a/drivers/staging/speakup/selection.c +++ b/drivers/staging/speakup/selection.c @@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work) goto unref; } - console_lock(); set_selection_kernel(&sel, tty); - console_unlock(); unref: tty_kref_put(tty); diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index 9d85a3a1af4c..28cedaec6d8a 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -388,7 +388,7 @@ static int softsynth_probe(struct spk_synth *synth) synthu_device.name = "softsynthu"; synthu_device.fops = &softsynthu_fops; if (misc_register(&synthu_device)) { - pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n"); + pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n"); return -ENODEV; } diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index ac6a74883af4..c75b40838794 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h @@ -11,13 +11,11 @@ #ifndef _SPEAKUP_PRIVATE_H #define _SPEAKUP_PRIVATE_H +#include <linux/printk.h> + #include "spk_types.h" #include "spk_priv_keyinfo.h" -#ifndef pr_warn -#define pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) -#endif - #define V_LAST_VAR { MAXVARS } #define SPACE 0x20 #define SYNTH_CHECK 20030716 /* today's date ought to do for check value */ diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index 5a9eff08cb96..9b95f77f9265 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -51,7 +51,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) return -EOPNOTSUPP; speakup_tty = tty; - ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL); + ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL); if (!ldisc_data) return -ENOMEM; diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h index a2fc72c29894..fc6a9416829c 100644 --- a/drivers/staging/speakup/spk_types.h +++ b/drivers/staging/speakup/spk_types.h @@ -189,7 +189,7 @@ struct spk_synth { void (*flush)(struct spk_synth *synth); int (*is_alive)(struct spk_synth *synth); int (*synth_adjust)(struct st_var_header *var); - void (*read_buff_add)(u_char); + void (*read_buff_add)(u_char c); unsigned char (*get_index)(struct spk_synth *synth); struct synth_indexing indexing; int alive; diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt index f8a4144b239c..cf29f884cbe0 100644 --- a/drivers/staging/unisys/Documentation/overview.txt +++ b/drivers/staging/unisys/Documentation/overview.txt @@ -15,12 +15,12 @@ normally be unsharable, specifically: * visorinput - keyboard and mouse These drivers conform to the standard Linux bus/device model described -within Documentation/driver-api/driver-model/, and utilize a driver named visorbus to -present the virtual busses involved. Drivers in the 'visor*' driver set are -commonly referred to as "guest drivers" or "client drivers". All drivers -except visorbus expose a device of a specific usable class to the Linux guest -environment (e.g., block, network, or input), and are collectively referred -to as "function drivers". +within Documentation/driver-api/driver-model/, and utilize a driver named +visorbus to present the virtual busses involved. Drivers in the 'visor*' +driver set are commonly referred to as "guest drivers" or "client drivers". +All drivers except visorbus expose a device of a specific usable class to the +Linux guest environment (e.g., block, network, or input), and are collectively +referred to as "function drivers". The back-end for each device is owned and managed by a small, single-purpose service partition in the s-Par firmware, which communicates diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c index 9693fb559052..6d202cba8575 100644 --- a/drivers/staging/unisys/visorinput/visorinput.c +++ b/drivers/staging/unisys/visorinput/visorinput.c @@ -111,7 +111,7 @@ struct visorinput_devdata { /* size of following array */ unsigned int keycode_table_bytes; /* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */ - unsigned char keycode_table[0]; + unsigned char keycode_table[]; }; static const guid_t visor_keyboard_channel_guid = VISOR_KEYBOARD_CHANNEL_GUID; diff --git a/drivers/staging/uwb/Kconfig b/drivers/staging/uwb/Kconfig deleted file mode 100644 index 259e053e1e09..000000000000 --- a/drivers/staging/uwb/Kconfig +++ /dev/null @@ -1,72 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# UWB device configuration -# - -menuconfig UWB - tristate "Ultra Wideband devices" - default n - select GENERIC_NET_UTILS - help - UWB is a high-bandwidth, low-power, point-to-point radio - technology using a wide spectrum (3.1-10.6GHz). It is - optimized for in-room use (480Mbps at 2 meters, 110Mbps at - 10m). It serves as the transport layer for other protocols, - such as Wireless USB (WUSB). - - The topology is peer to peer; however, higher level - protocols (such as WUSB) might impose a master/slave - relationship. - - Say Y here if your computer has UWB radio controllers (USB or PCI) - based. You will need to enable the radio controllers - below. It is ok to select all of them, no harm done. - - For more help check the UWB and WUSB related files in - <file:Documentation/usb/>. - - To compile the UWB stack as a module, choose M here. - -if UWB - -config UWB_HWA - tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)" - depends on USB - help - This driver enables the radio controller for HWA USB - devices. HWA stands for Host Wire Adapter, and it is a UWB - Radio Controller connected to your system via USB. Most of - them come with a Wireless USB host controller also. - - To compile this driver select Y (built in) or M (module). It - is safe to select any even if you do not have the hardware. - -config UWB_WHCI - tristate "UWB Radio Control driver for WHCI-compliant cards" - depends on PCI - help - This driver enables the radio controller for WHCI cards. - - WHCI is a specification developed by Intel - (http://www.intel.com/technology/comms/wusb/whci.htm) much - in the spirit of USB's EHCI, but for UWB and Wireless USB - radio/host controllers connected via memory mapping (eg: - PCI). Most of these cards come also with a Wireless USB host - controller. - - To compile this driver select Y (built in) or M (module). It - is safe to select any even if you do not have the hardware. - -config UWB_I1480U - tristate "Support for Intel Wireless UWB Link 1480 HWA" - depends on UWB_HWA - select FW_LOADER - help - This driver enables support for the i1480 when connected via - USB. It consists of a firmware uploader that will enable it - to behave as an HWA device. - - To compile this driver select Y (built in) or M (module). It - is safe to select any even if you do not have the hardware. - -endif # UWB diff --git a/drivers/staging/uwb/Makefile b/drivers/staging/uwb/Makefile deleted file mode 100644 index 32f4de7afbd6..000000000000 --- a/drivers/staging/uwb/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_UWB) += uwb.o -obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o -obj-$(CONFIG_UWB_HWA) += hwa-rc.o -obj-$(CONFIG_UWB_I1480U) += i1480/ - -uwb-objs := \ - address.o \ - allocator.o \ - beacon.o \ - driver.o \ - drp.o \ - drp-avail.o \ - drp-ie.o \ - est.o \ - ie.o \ - ie-rcv.o \ - lc-dev.o \ - lc-rc.o \ - neh.o \ - pal.o \ - radio.o \ - reset.o \ - rsv.o \ - scan.o \ - uwb-debug.o \ - uwbd.o - -umc-objs := \ - umc-bus.o \ - umc-dev.o \ - umc-drv.o diff --git a/drivers/staging/uwb/TODO b/drivers/staging/uwb/TODO deleted file mode 100644 index abae57000534..000000000000 --- a/drivers/staging/uwb/TODO +++ /dev/null @@ -1,8 +0,0 @@ -TODO: Remove in late 2019 unless there are users - -There seems to not be any real wireless USB devices anywhere in the wild -anymore. It turned out to be a failed technology :( - -This will be removed from the tree if no one objects. - -Greg Kroah-Hartman <gregkh@linuxfoundation.org> diff --git a/drivers/staging/uwb/address.c b/drivers/staging/uwb/address.c deleted file mode 100644 index 857d5cd56a95..000000000000 --- a/drivers/staging/uwb/address.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Address management - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ - -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/random.h> -#include <linux/etherdevice.h> - -#include "uwb-internal.h" - - -/** Device Address Management command */ -struct uwb_rc_cmd_dev_addr_mgmt { - struct uwb_rccb rccb; - u8 bmOperationType; - u8 baAddr[6]; -} __attribute__((packed)); - - -/** - * Low level command for setting/getting UWB radio's addresses - * - * @hwarc: HWA Radio Control interface instance - * @bmOperationType: - * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2]) - * @baAddr: address buffer--assumed to have enough data to hold - * the address type requested. - * @reply: Pointer to reply buffer (can be stack allocated) - * @returns: 0 if ok, < 0 errno code on error. - * - * @cmd has to be allocated because USB cannot grok USB or vmalloc - * buffers depending on your combination of host architecture. - */ -static -int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, - u8 bmOperationType, const u8 *baAddr, - struct uwb_rc_evt_dev_addr_mgmt *reply) -{ - int result; - struct uwb_rc_cmd_dev_addr_mgmt *cmd; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_kzalloc; - cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; - cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT); - cmd->bmOperationType = bmOperationType; - if (baAddr) { - size_t size = 0; - switch (bmOperationType >> 1) { - case 0: size = 2; break; - case 1: size = 6; break; - default: BUG(); - } - memcpy(cmd->baAddr, baAddr, size); - } - reply->rceb.bEventType = UWB_RC_CET_GENERAL; - reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; - result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT", - &cmd->rccb, sizeof(*cmd), - &reply->rceb, sizeof(*reply)); - if (result < 0) - goto error_cmd; - if (result < sizeof(*reply)) { - dev_err(&rc->uwb_dev.dev, - "DEV-ADDR-MGMT: not enough data replied: " - "%d vs %zu bytes needed\n", result, sizeof(*reply)); - result = -ENOMSG; - } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(&rc->uwb_dev.dev, - "DEV-ADDR-MGMT: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply->bResultCode), - reply->bResultCode); - result = -EIO; - } else - result = 0; -error_cmd: - kfree(cmd); -error_kzalloc: - return result; -} - - -/** - * Set the UWB RC MAC or device address. - * - * @rc: UWB Radio Controller - * @_addr: Pointer to address to write [assumed to be either a - * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. - * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC). - * @returns: 0 if ok, < 0 errno code on error. - * - * Some anal retentivity here: even if both 'struct - * uwb_{dev,mac}_addr' have the actual byte array in the same offset - * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer - * to use some syntatic sugar in case someday we decide to change the - * format of the structs. The compiler will optimize it out anyway. - */ -static int uwb_rc_addr_set(struct uwb_rc *rc, - const void *_addr, enum uwb_addr_type type) -{ - int result; - u8 bmOperationType = 0x1; /* Set address */ - const struct uwb_dev_addr *dev_addr = _addr; - const struct uwb_mac_addr *mac_addr = _addr; - struct uwb_rc_evt_dev_addr_mgmt reply; - const u8 *baAddr; - - result = -EINVAL; - switch (type) { - case UWB_ADDR_DEV: - baAddr = dev_addr->data; - break; - case UWB_ADDR_MAC: - baAddr = mac_addr->data; - bmOperationType |= 0x2; - break; - default: - return result; - } - return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); -} - - -/** - * Get the UWB radio's MAC or device address. - * - * @rc: UWB Radio Controller - * @_addr: Where to write the address data [assumed to be either a - * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. - * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC). - * @returns: 0 if ok (and *_addr set), < 0 errno code on error. - * - * See comment in uwb_rc_addr_set() about anal retentivity in the - * type handling of the address variables. - */ -static int uwb_rc_addr_get(struct uwb_rc *rc, - void *_addr, enum uwb_addr_type type) -{ - int result; - u8 bmOperationType = 0x0; /* Get address */ - struct uwb_rc_evt_dev_addr_mgmt evt; - struct uwb_dev_addr *dev_addr = _addr; - struct uwb_mac_addr *mac_addr = _addr; - u8 *baAddr; - - result = -EINVAL; - switch (type) { - case UWB_ADDR_DEV: - baAddr = dev_addr->data; - break; - case UWB_ADDR_MAC: - bmOperationType |= 0x2; - baAddr = mac_addr->data; - break; - default: - return result; - } - result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt); - if (result == 0) - switch (type) { - case UWB_ADDR_DEV: - memcpy(&dev_addr->data, evt.baAddr, - sizeof(dev_addr->data)); - break; - case UWB_ADDR_MAC: - memcpy(&mac_addr->data, evt.baAddr, - sizeof(mac_addr->data)); - break; - default: /* shut gcc up */ - BUG(); - } - return result; -} - - -/** Get @rc's MAC address to @addr */ -int uwb_rc_mac_addr_get(struct uwb_rc *rc, - struct uwb_mac_addr *addr) { - return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC); -} -EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get); - - -/** Get @rc's device address to @addr */ -int uwb_rc_dev_addr_get(struct uwb_rc *rc, - struct uwb_dev_addr *addr) { - return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV); -} -EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get); - - -/** Set @rc's address to @addr */ -int uwb_rc_mac_addr_set(struct uwb_rc *rc, - const struct uwb_mac_addr *addr) -{ - int result = -EINVAL; - mutex_lock(&rc->uwb_dev.mutex); - result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC); - mutex_unlock(&rc->uwb_dev.mutex); - return result; -} - - -/** Set @rc's address to @addr */ -int uwb_rc_dev_addr_set(struct uwb_rc *rc, - const struct uwb_dev_addr *addr) -{ - int result = -EINVAL; - mutex_lock(&rc->uwb_dev.mutex); - result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV); - rc->uwb_dev.dev_addr = *addr; - mutex_unlock(&rc->uwb_dev.mutex); - return result; -} - -/* Returns !0 if given address is already assigned to device. */ -int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_mac_addr *addr = _addr; - - if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr)) - return !0; - return 0; -} - -/* Returns !0 if given address is already assigned to device. */ -int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_dev_addr *addr = _addr; - if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr)) - return !0; - return 0; -} - -/** - * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller - * @rc: the (local) radio controller device requiring a new DevAddr - * - * A new DevAddr is required when: - * - first setting up a radio controller - * - if the hardware reports a DevAddr conflict - * - * The DevAddr is randomly generated in the generated DevAddr range - * [0x100, 0xfeff]. The number of devices in a beacon group is limited - * by mMaxBPLength (96) so this address space will never be exhausted. - * - * [ECMA-368] 17.1.1, 17.16. - */ -int uwb_rc_dev_addr_assign(struct uwb_rc *rc) -{ - struct uwb_dev_addr new_addr; - - do { - get_random_bytes(new_addr.data, sizeof(new_addr.data)); - } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff - || __uwb_dev_addr_assigned(rc, &new_addr)); - - return uwb_rc_dev_addr_set(rc, &new_addr); -} - -/** - * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event - * @evt: the DEV_ADDR_CONFLICT notification from the radio controller - * - * A new (non-conflicting) DevAddr is assigned to the radio controller. - * - * [ECMA-368] 17.1.1.1. - */ -int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt) -{ - struct uwb_rc *rc = evt->rc; - - return uwb_rc_dev_addr_assign(rc); -} - -/* - * Print the 48-bit EUI MAC address of the radio controller when - * reading /sys/class/uwb_rc/XX/mac_address - */ -static ssize_t uwb_rc_mac_addr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - struct uwb_mac_addr addr; - ssize_t result; - - mutex_lock(&rc->uwb_dev.mutex); - result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC); - mutex_unlock(&rc->uwb_dev.mutex); - if (result >= 0) { - result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr); - buf[result++] = '\n'; - } - return result; -} - -/* - * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address - * and if correct, set it. - */ -static ssize_t uwb_rc_mac_addr_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - struct uwb_mac_addr addr; - ssize_t result; - - if (!mac_pton(buf, addr.data)) - return -EINVAL; - if (is_multicast_ether_addr(addr.data)) { - dev_err(&rc->uwb_dev.dev, "refusing to set multicast " - "MAC address %s\n", buf); - return -EINVAL; - } - result = uwb_rc_mac_addr_set(rc, &addr); - if (result == 0) - rc->uwb_dev.mac_addr = addr; - - return result < 0 ? result : size; -} -DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store); - -/** Print @addr to @buf, @return bytes written */ -size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr, - int type) -{ - size_t result; - if (type) - result = scnprintf(buf, buf_size, "%pM", addr); - else - result = scnprintf(buf, buf_size, "%02x:%02x", - addr[1], addr[0]); - return result; -} -EXPORT_SYMBOL_GPL(__uwb_addr_print); diff --git a/drivers/staging/uwb/allocator.c b/drivers/staging/uwb/allocator.c deleted file mode 100644 index 1f429fba20b7..000000000000 --- a/drivers/staging/uwb/allocator.c +++ /dev/null @@ -1,374 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB reservation management. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/slab.h> -#include "uwb.h" - -#include "uwb-internal.h" - -static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) -{ - int col, mas, safe_mas, unsafe_mas; - unsigned char *bm = ai->bm; - struct uwb_rsv_col_info *ci = ai->ci; - unsigned char c; - - for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { - - safe_mas = ci->csi.safe_mas_per_col; - unsafe_mas = ci->csi.unsafe_mas_per_col; - - for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { - if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { - - if (safe_mas > 0) { - safe_mas--; - c = UWB_RSV_MAS_SAFE; - } else if (unsafe_mas > 0) { - unsafe_mas--; - c = UWB_RSV_MAS_UNSAFE; - } else { - break; - } - bm[col * UWB_MAS_PER_ZONE + mas] = c; - } - } - } -} - -static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) -{ - int mas, col, rows; - unsigned char *bm = ai->bm; - struct uwb_rsv_row_info *ri = &ai->ri; - unsigned char c; - - rows = 1; - c = UWB_RSV_MAS_SAFE; - for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { - if (ri->avail[mas] == 1) { - - if (rows > ri->used_rows) { - break; - } else if (rows > 7) { - c = UWB_RSV_MAS_UNSAFE; - } - - for (col = 0; col < UWB_NUM_ZONES; col++) { - if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { - bm[col * UWB_NUM_ZONES + mas] = c; - if(c == UWB_RSV_MAS_SAFE) - ai->safe_allocated_mases++; - else - ai->unsafe_allocated_mases++; - } - } - rows++; - } - } - ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; -} - -/* - * Find the best column set for a given availability, interval, num safe mas and - * num unsafe mas. - * - * The different sets are tried in order as shown below, depending on the interval. - * - * interval = 16 - * deep = 0 - * set 1 -> { 8 } - * deep = 1 - * set 1 -> { 4 } - * set 2 -> { 12 } - * deep = 2 - * set 1 -> { 2 } - * set 2 -> { 6 } - * set 3 -> { 10 } - * set 4 -> { 14 } - * deep = 3 - * set 1 -> { 1 } - * set 2 -> { 3 } - * set 3 -> { 5 } - * set 4 -> { 7 } - * set 5 -> { 9 } - * set 6 -> { 11 } - * set 7 -> { 13 } - * set 8 -> { 15 } - * - * interval = 8 - * deep = 0 - * set 1 -> { 4 12 } - * deep = 1 - * set 1 -> { 2 10 } - * set 2 -> { 6 14 } - * deep = 2 - * set 1 -> { 1 9 } - * set 2 -> { 3 11 } - * set 3 -> { 5 13 } - * set 4 -> { 7 15 } - * - * interval = 4 - * deep = 0 - * set 1 -> { 2 6 10 14 } - * deep = 1 - * set 1 -> { 1 5 9 13 } - * set 2 -> { 3 7 11 15 } - * - * interval = 2 - * deep = 0 - * set 1 -> { 1 3 5 7 9 11 13 15 } - */ -static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, - int num_safe_mas, int num_unsafe_mas) -{ - struct uwb_rsv_col_info *ci = ai->ci; - struct uwb_rsv_col_set_info *csi = &ci->csi; - struct uwb_rsv_col_set_info tmp_csi; - int deep, set, col, start_col_deep, col_start_set; - int start_col, max_mas_in_set, lowest_max_mas_in_deep; - int n_mas; - int found = UWB_RSV_ALLOC_NOT_FOUND; - - tmp_csi.start_col = 0; - start_col_deep = interval; - n_mas = num_unsafe_mas + num_safe_mas; - - for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { - start_col_deep /= 2; - col_start_set = 0; - lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; - - for (set = 1; set <= (1 << deep); set++) { - max_mas_in_set = 0; - start_col = start_col_deep + col_start_set; - for (col = start_col; col < UWB_NUM_ZONES; col += interval) { - - if (ci[col].max_avail_safe >= num_safe_mas && - ci[col].max_avail_unsafe >= n_mas) { - if (ci[col].highest_mas[n_mas] > max_mas_in_set) - max_mas_in_set = ci[col].highest_mas[n_mas]; - } else { - max_mas_in_set = 0; - break; - } - } - if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { - lowest_max_mas_in_deep = max_mas_in_set; - - tmp_csi.start_col = start_col; - } - col_start_set += (interval >> deep); - } - - if (lowest_max_mas_in_deep < 8) { - csi->start_col = tmp_csi.start_col; - found = UWB_RSV_ALLOC_FOUND; - break; - } else if ((lowest_max_mas_in_deep > 8) && - (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && - (found == UWB_RSV_ALLOC_NOT_FOUND)) { - csi->start_col = tmp_csi.start_col; - found = UWB_RSV_ALLOC_FOUND; - } - } - - if (found == UWB_RSV_ALLOC_FOUND) { - csi->interval = interval; - csi->safe_mas_per_col = num_safe_mas; - csi->unsafe_mas_per_col = num_unsafe_mas; - - ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; - ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; - ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; - ai->interval = interval; - } - return found; -} - -static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) -{ - unsigned char *bm = ai->bm; - struct uwb_rsv_row_info *ri = &ai->ri; - int col, mas; - - ri->free_rows = 16; - for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { - ri->avail[mas] = 1; - for (col = 1; col < UWB_NUM_ZONES; col++) { - if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { - ri->free_rows--; - ri->avail[mas]=0; - break; - } - } - } -} - -static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) -{ - int mas; - int block_count = 0, start_block = 0; - int previous_avail = 0; - int available = 0; - int safe_mas_in_row[UWB_MAS_PER_ZONE] = { - 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, - }; - - rci->max_avail_safe = 0; - - for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { - if (!bm[column * UWB_NUM_ZONES + mas]) { - available++; - rci->max_avail_unsafe = available; - - rci->highest_mas[available] = mas; - - if (previous_avail) { - block_count++; - if ((block_count > safe_mas_in_row[start_block]) && - (!rci->max_avail_safe)) - rci->max_avail_safe = available - 1; - } else { - previous_avail = 1; - start_block = mas; - block_count = 1; - } - } else { - previous_avail = 0; - } - } - if (!rci->max_avail_safe) - rci->max_avail_safe = rci->max_avail_unsafe; -} - -static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) -{ - unsigned char *bm = ai->bm; - struct uwb_rsv_col_info *ci = ai->ci; - int col; - - for (col = 1; col < UWB_NUM_ZONES; col++) { - uwb_rsv_fill_column_info(bm, col, &ci[col]); - } -} - -static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) -{ - int n_rows; - int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; - int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; - if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) - min_rows++; - for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { - if (n_rows <= ai->ri.free_rows) { - ai->ri.used_rows = n_rows; - ai->interval = 1; /* row reservation */ - uwb_rsv_fill_row_alloc(ai); - return UWB_RSV_ALLOC_FOUND; - } - } - return UWB_RSV_ALLOC_NOT_FOUND; -} - -static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) -{ - int n_safe, n_unsafe, n_mas; - int n_column = UWB_NUM_ZONES / interval; - int max_per_zone = ai->max_mas / n_column; - int min_per_zone = ai->min_mas / n_column; - - if (ai->min_mas % n_column) - min_per_zone++; - - if (min_per_zone > UWB_MAS_PER_ZONE) { - return UWB_RSV_ALLOC_NOT_FOUND; - } - - if (max_per_zone > UWB_MAS_PER_ZONE) { - max_per_zone = UWB_MAS_PER_ZONE; - } - - for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { - if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) - continue; - for (n_safe = n_mas; n_safe >= 0; n_safe--) { - n_unsafe = n_mas - n_safe; - if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { - uwb_rsv_fill_column_alloc(ai); - return UWB_RSV_ALLOC_FOUND; - } - } - } - return UWB_RSV_ALLOC_NOT_FOUND; -} - -int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, - struct uwb_mas_bm *result) -{ - struct uwb_rsv_alloc_info *ai; - int interval; - int bit_index; - - ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); - if (!ai) - return UWB_RSV_ALLOC_NOT_FOUND; - ai->min_mas = rsv->min_mas; - ai->max_mas = rsv->max_mas; - ai->max_interval = rsv->max_interval; - - - /* fill the not available vector from the available bm */ - for_each_clear_bit(bit_index, available->bm, UWB_NUM_MAS) - ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; - - if (ai->max_interval == 1) { - get_row_descriptors(ai); - if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) - goto alloc_found; - else - goto alloc_not_found; - } - - get_column_descriptors(ai); - - for (interval = 16; interval >= 2; interval>>=1) { - if (interval > ai->max_interval) - continue; - if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) - goto alloc_found; - } - - /* try row reservation if no column is found */ - get_row_descriptors(ai); - if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) - goto alloc_found; - else - goto alloc_not_found; - - alloc_found: - bitmap_zero(result->bm, UWB_NUM_MAS); - bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); - /* fill the safe and unsafe bitmaps */ - for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { - if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) - set_bit(bit_index, result->bm); - else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) - set_bit(bit_index, result->unsafe_bm); - } - bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); - - result->safe = ai->safe_allocated_mases; - result->unsafe = ai->unsafe_allocated_mases; - - kfree(ai); - return UWB_RSV_ALLOC_FOUND; - - alloc_not_found: - kfree(ai); - return UWB_RSV_ALLOC_NOT_FOUND; -} diff --git a/drivers/staging/uwb/beacon.c b/drivers/staging/uwb/beacon.c deleted file mode 100644 index c483c19c5ef8..000000000000 --- a/drivers/staging/uwb/beacon.c +++ /dev/null @@ -1,595 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Beacon management - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/kdev_t.h> -#include <linux/slab.h> - -#include "uwb-internal.h" - -/* Start Beaconing command structure */ -struct uwb_rc_cmd_start_beacon { - struct uwb_rccb rccb; - __le16 wBPSTOffset; - u8 bChannelNumber; -} __attribute__((packed)); - - -static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel) -{ - int result; - struct uwb_rc_cmd_start_beacon *cmd; - struct uwb_rc_evt_confirm reply; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; - cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON); - cmd->wBPSTOffset = cpu_to_le16(bpst_offset); - cmd->bChannelNumber = channel; - reply.rceb.bEventType = UWB_RC_CET_GENERAL; - reply.rceb.wEvent = UWB_RC_CMD_START_BEACON; - result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd), - &reply.rceb, sizeof(reply)); - if (result < 0) - goto error_cmd; - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(&rc->uwb_dev.dev, - "START-BEACON: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply.bResultCode), reply.bResultCode); - result = -EIO; - } -error_cmd: - kfree(cmd); - return result; -} - -static int uwb_rc_stop_beacon(struct uwb_rc *rc) -{ - int result; - struct uwb_rccb *cmd; - struct uwb_rc_evt_confirm reply; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - cmd->bCommandType = UWB_RC_CET_GENERAL; - cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON); - reply.rceb.bEventType = UWB_RC_CET_GENERAL; - reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON; - result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd), - &reply.rceb, sizeof(reply)); - if (result < 0) - goto error_cmd; - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(&rc->uwb_dev.dev, - "STOP-BEACON: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply.bResultCode), reply.bResultCode); - result = -EIO; - } -error_cmd: - kfree(cmd); - return result; -} - -/* - * Start/stop beacons - * - * @rc: UWB Radio Controller to operate on - * @channel: UWB channel on which to beacon (WUSB[table - * 5-12]). If -1, stop beaconing. - * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero - * - * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB - * of a SET IE command after the device sent the first beacon that includes - * the IEs specified in the SET IE command. So, after we start beaconing we - * check if there is anything in the IE cache and call the SET IE command - * if needed. - */ -int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) -{ - int result; - struct device *dev = &rc->uwb_dev.dev; - - dev_dbg(dev, "%s: channel = %d\n", __func__, channel); - if (channel < 0) - channel = -1; - if (channel == -1) - result = uwb_rc_stop_beacon(rc); - else { - /* channel >= 0...dah */ - result = uwb_rc_start_beacon(rc, bpst_offset, channel); - if (result < 0) { - dev_err(dev, "Cannot start beaconing: %d\n", result); - return result; - } - if (le16_to_cpu(rc->ies->wIELength) > 0) { - result = uwb_rc_set_ie(rc, rc->ies); - if (result < 0) { - dev_err(dev, "Cannot set new IE on device: " - "%d\n", result); - result = uwb_rc_stop_beacon(rc); - channel = -1; - bpst_offset = 0; - } - } - } - - if (result >= 0) - rc->beaconing = channel; - return result; -} - -/* - * Beacon cache - * - * The purpose of this is to speed up the lookup of becon information - * when a new beacon arrives. The UWB Daemon uses it also to keep a - * tab of which devices are in radio distance and which not. When a - * device's beacon stays present for more than a certain amount of - * time, it is considered a new, usable device. When a beacon ceases - * to be received for a certain amount of time, it is considered that - * the device is gone. - * - * FIXME: use an allocator for the entries - * FIXME: use something faster for search than a list - */ - -void uwb_bce_kfree(struct kref *_bce) -{ - struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); - - kfree(bce->be); - kfree(bce); -} - - -/* Find a beacon by dev addr in the cache */ -static -struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, - const struct uwb_dev_addr *dev_addr) -{ - struct uwb_beca_e *bce, *next; - list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { - if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) - goto out; - } - bce = NULL; -out: - return bce; -} - -/* Find a beacon by dev addr in the cache */ -static -struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, - const struct uwb_mac_addr *mac_addr) -{ - struct uwb_beca_e *bce, *next; - list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { - if (!memcmp(bce->mac_addr, mac_addr->data, - sizeof(struct uwb_mac_addr))) - goto out; - } - bce = NULL; -out: - return bce; -} - -/** - * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr - * @rc: the radio controller that saw the device - * @devaddr: DevAddr of the UWB device to find - * - * There may be more than one matching device (in the case of a - * DevAddr conflict), but only the first one is returned. - */ -struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, - const struct uwb_dev_addr *devaddr) -{ - struct uwb_dev *found = NULL; - struct uwb_beca_e *bce; - - mutex_lock(&rc->uwb_beca.mutex); - bce = __uwb_beca_find_bydev(rc, devaddr); - if (bce) - found = uwb_dev_try_get(rc, bce->uwb_dev); - mutex_unlock(&rc->uwb_beca.mutex); - - return found; -} - -/** - * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48 - * @rc: the radio controller that saw the device - * @devaddr: EUI-48 of the UWB device to find - */ -struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, - const struct uwb_mac_addr *macaddr) -{ - struct uwb_dev *found = NULL; - struct uwb_beca_e *bce; - - mutex_lock(&rc->uwb_beca.mutex); - bce = __uwb_beca_find_bymac(rc, macaddr); - if (bce) - found = uwb_dev_try_get(rc, bce->uwb_dev); - mutex_unlock(&rc->uwb_beca.mutex); - - return found; -} - -/* Initialize a beacon cache entry */ -static void uwb_beca_e_init(struct uwb_beca_e *bce) -{ - mutex_init(&bce->mutex); - kref_init(&bce->refcnt); - stats_init(&bce->lqe_stats); - stats_init(&bce->rssi_stats); -} - -/* - * Add a beacon to the cache - * - * @be: Beacon event information - * @bf: Beacon frame (part of b, really) - * @ts_jiffies: Timestamp (in jiffies) when the beacon was received - */ -static -struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, - struct uwb_rc_evt_beacon *be, - struct uwb_beacon_frame *bf, - unsigned long ts_jiffies) -{ - struct uwb_beca_e *bce; - - bce = kzalloc(sizeof(*bce), GFP_KERNEL); - if (bce == NULL) - return NULL; - uwb_beca_e_init(bce); - bce->ts_jiffies = ts_jiffies; - bce->uwb_dev = NULL; - list_add(&bce->node, &rc->uwb_beca.list); - return bce; -} - -/* - * Wipe out beacon entries that became stale - * - * Remove associated devicest too. - */ -void uwb_beca_purge(struct uwb_rc *rc) -{ - struct uwb_beca_e *bce, *next; - unsigned long expires; - - mutex_lock(&rc->uwb_beca.mutex); - list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { - expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); - if (time_after(jiffies, expires)) { - uwbd_dev_offair(bce); - } - } - mutex_unlock(&rc->uwb_beca.mutex); -} - -/* Clean up the whole beacon cache. Called on shutdown */ -void uwb_beca_release(struct uwb_rc *rc) -{ - struct uwb_beca_e *bce, *next; - - mutex_lock(&rc->uwb_beca.mutex); - list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { - list_del(&bce->node); - uwb_bce_put(bce); - } - mutex_unlock(&rc->uwb_beca.mutex); -} - -static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, - struct uwb_beacon_frame *bf) -{ - char macbuf[UWB_ADDR_STRSIZE]; - char devbuf[UWB_ADDR_STRSIZE]; - char dstbuf[UWB_ADDR_STRSIZE]; - - uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier); - uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr); - uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr); - dev_info(&rc->uwb_dev.dev, - "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n", - devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset, - bf->Beacon_Slot_Number, macbuf); -} - -/* - * @bce: beacon cache entry, referenced - */ -ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, - char *buf, size_t size) -{ - ssize_t result = 0; - struct uwb_rc_evt_beacon *be; - struct uwb_beacon_frame *bf; - int ies_len; - struct uwb_ie_hdr *ies; - - mutex_lock(&bce->mutex); - - be = bce->be; - if (be) { - bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; - ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); - ies = (struct uwb_ie_hdr *)bf->IEData; - - result = uwb_ie_dump_hex(ies, ies_len, buf, size); - } - - mutex_unlock(&bce->mutex); - - return result; -} - -/* - * Verify that the beacon event, frame and IEs are ok - */ -static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt, - struct uwb_rc_evt_beacon *be) -{ - int result = -EINVAL; - struct uwb_beacon_frame *bf; - struct device *dev = &rc->uwb_dev.dev; - - /* Is there enough data to decode a beacon frame? */ - if (evt->notif.size < sizeof(*be) + sizeof(*bf)) { - dev_err(dev, "BEACON event: Not enough data to decode " - "(%zu vs %zu bytes needed)\n", evt->notif.size, - sizeof(*be) + sizeof(*bf)); - goto error; - } - /* FIXME: make sure beacon frame IEs are fine and that the whole thing - * is consistent */ - result = 0; -error: - return result; -} - -/* - * Handle UWB_RC_EVT_BEACON events - * - * We check the beacon cache to see how the received beacon fares. If - * is there already we refresh the timestamp. If not we create a new - * entry. - * - * According to the WHCI and WUSB specs, only one beacon frame is - * allowed per notification block, so we don't bother about scanning - * for more. - */ -int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) -{ - int result = -EINVAL; - struct uwb_rc *rc; - struct uwb_rc_evt_beacon *be; - struct uwb_beacon_frame *bf; - struct uwb_beca_e *bce; - - rc = evt->rc; - be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb); - result = uwb_verify_beacon(rc, evt, be); - if (result < 0) - return result; - - /* FIXME: handle alien beacons. */ - if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN || - be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) { - return -ENOSYS; - } - - bf = (struct uwb_beacon_frame *) be->BeaconInfo; - - /* - * Drop beacons from devices with a NULL EUI-48 -- they cannot - * be uniquely identified. - * - * It's expected that these will all be WUSB devices and they - * have a WUSB specific connection method so ignoring them - * here shouldn't be a problem. - */ - if (uwb_mac_addr_bcast(&bf->Device_Identifier)) - return 0; - - mutex_lock(&rc->uwb_beca.mutex); - bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); - if (bce == NULL) { - /* Not in there, a new device is pinging */ - uwb_beacon_print(evt->rc, be, bf); - bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); - if (bce == NULL) { - mutex_unlock(&rc->uwb_beca.mutex); - return -ENOMEM; - } - } - mutex_unlock(&rc->uwb_beca.mutex); - - mutex_lock(&bce->mutex); - /* purge old beacon data */ - kfree(bce->be); - - /* Update commonly used fields */ - bce->ts_jiffies = evt->ts_jiffies; - bce->be = be; - bce->dev_addr = bf->hdr.SrcAddr; - bce->mac_addr = &bf->Device_Identifier; - be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset); - be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength); - stats_add_sample(&bce->lqe_stats, be->bLQI - 7); - stats_add_sample(&bce->rssi_stats, be->bRSSI + 18); - - /* - * This might be a beacon from a new device. - */ - if (bce->uwb_dev == NULL) - uwbd_dev_onair(evt->rc, bce); - - mutex_unlock(&bce->mutex); - - return 1; /* we keep the event data */ -} - -/* - * Handle UWB_RC_EVT_BEACON_SIZE events - * - * XXXXX - */ -int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt) -{ - int result = -EINVAL; - struct device *dev = &evt->rc->uwb_dev.dev; - struct uwb_rc_evt_beacon_size *bs; - - /* Is there enough data to decode the event? */ - if (evt->notif.size < sizeof(*bs)) { - dev_err(dev, "BEACON SIZE notification: Not enough data to " - "decode (%zu vs %zu bytes needed)\n", - evt->notif.size, sizeof(*bs)); - goto error; - } - bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb); - if (0) - dev_info(dev, "Beacon size changed to %u bytes " - "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize)); - else { - /* temporary hack until we do something with this message... */ - static unsigned count; - if (++count % 1000 == 0) - dev_info(dev, "Beacon size changed %u times " - "(FIXME: action?)\n", count); - } - result = 0; -error: - return result; -} - -/** - * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event - * @evt: the BP_SLOT_CHANGE notification from the radio controller - * - * If the event indicates that no beacon period slots were available - * then radio controller has transitioned to a non-beaconing state. - * Otherwise, simply save the current beacon slot. - */ -int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt) -{ - struct uwb_rc *rc = evt->rc; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_evt_bp_slot_change *bpsc; - - if (evt->notif.size < sizeof(*bpsc)) { - dev_err(dev, "BP SLOT CHANGE event: Not enough data\n"); - return -EINVAL; - } - bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb); - - if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) { - dev_err(dev, "stopped beaconing: No free slots in BP\n"); - mutex_lock(&rc->uwb_dev.mutex); - rc->beaconing = -1; - mutex_unlock(&rc->uwb_dev.mutex); - } else - rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc); - - return 0; -} - -/** - * Handle UWB_RC_EVT_BPOIE_CHANGE events - * - * XXXXX - */ -struct uwb_ie_bpo { - struct uwb_ie_hdr hdr; - u8 bp_length; - u8 data[]; -} __attribute__((packed)); - -int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt) -{ - int result = -EINVAL; - struct device *dev = &evt->rc->uwb_dev.dev; - struct uwb_rc_evt_bpoie_change *bpoiec; - struct uwb_ie_bpo *bpoie; - static unsigned count; /* FIXME: this is a temp hack */ - size_t iesize; - - /* Is there enough data to decode it? */ - if (evt->notif.size < sizeof(*bpoiec)) { - dev_err(dev, "BPOIEC notification: Not enough data to " - "decode (%zu vs %zu bytes needed)\n", - evt->notif.size, sizeof(*bpoiec)); - goto error; - } - bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb); - iesize = le16_to_cpu(bpoiec->wBPOIELength); - if (iesize < sizeof(*bpoie)) { - dev_err(dev, "BPOIEC notification: Not enough IE data to " - "decode (%zu vs %zu bytes needed)\n", - iesize, sizeof(*bpoie)); - goto error; - } - if (++count % 1000 == 0) /* Lame placeholder */ - dev_info(dev, "BPOIE: %u changes received\n", count); - /* - * FIXME: At this point we should go over all the IEs in the - * bpoiec->BPOIE array and act on each. - */ - result = 0; -error: - return result; -} - -/* - * Print beaconing state. - */ -static ssize_t uwb_rc_beacon_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - ssize_t result; - - mutex_lock(&rc->uwb_dev.mutex); - result = sprintf(buf, "%d\n", rc->beaconing); - mutex_unlock(&rc->uwb_dev.mutex); - return result; -} - -/* - * Start beaconing on the specified channel, or stop beaconing. - */ -static ssize_t uwb_rc_beacon_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - int channel; - ssize_t result = -EINVAL; - - result = sscanf(buf, "%d", &channel); - if (result >= 1) - result = uwb_radio_force_channel(rc, channel); - - return result < 0 ? result : size; -} -DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store); diff --git a/drivers/staging/uwb/driver.c b/drivers/staging/uwb/driver.c deleted file mode 100644 index 5755c2e49ffc..000000000000 --- a/drivers/staging/uwb/driver.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Driver initialization, etc - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * - * Life cycle: FIXME: explain - * - * UWB radio controller: - * - * 1. alloc a uwb_rc, zero it - * 2. call uwb_rc_init() on it to set it up + ops (won't do any - * kind of allocation) - * 3. register (now it is owned by the UWB stack--deregister before - * freeing/destroying). - * 4. It lives on it's own now (UWB stack handles)--when it - * disconnects, call unregister() - * 5. free it. - * - * Make sure you have a reference to the uwb_rc before calling - * any of the UWB API functions. - * - * TODO: - * - * 1. Locking and life cycle management is crappy still. All entry - * points to the UWB HCD API assume you have a reference on the - * uwb_rc structure and that it won't go away. They mutex lock it - * before doing anything. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/kdev_t.h> -#include <linux/random.h> - -#include "uwb-internal.h" - - -/* UWB stack attributes (or 'global' constants) */ - - -/** - * If a beacon disappears for longer than this, then we consider the - * device who was represented by that beacon to be gone. - * - * ECMA-368[17.2.3, last para] establishes that a device must not - * consider a device to be its neighbour if he doesn't receive a beacon - * for more than mMaxLostBeacons. mMaxLostBeacons is defined in - * ECMA-368[17.16] as 3; because we can get only one beacon per - * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time - * for jitter and stuff and make it 500 ms. - */ -unsigned long beacon_timeout_ms = 500; - -static -ssize_t beacon_timeout_ms_show(struct class *class, - struct class_attribute *attr, - char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms); -} - -static -ssize_t beacon_timeout_ms_store(struct class *class, - struct class_attribute *attr, - const char *buf, size_t size) -{ - unsigned long bt; - ssize_t result; - result = sscanf(buf, "%lu", &bt); - if (result != 1) - return -EINVAL; - beacon_timeout_ms = bt; - return size; -} -static CLASS_ATTR_RW(beacon_timeout_ms); - -static struct attribute *uwb_class_attrs[] = { - &class_attr_beacon_timeout_ms.attr, - NULL, -}; -ATTRIBUTE_GROUPS(uwb_class); - -/** Device model classes */ -struct class uwb_rc_class = { - .name = "uwb_rc", - .class_groups = uwb_class_groups, -}; - - -static int __init uwb_subsys_init(void) -{ - int result = 0; - - result = uwb_est_create(); - if (result < 0) { - printk(KERN_ERR "uwb: Can't initialize EST subsystem\n"); - goto error_est_init; - } - - result = class_register(&uwb_rc_class); - if (result < 0) - goto error_uwb_rc_class_register; - - /* Register the UWB bus */ - result = bus_register(&uwb_bus_type); - if (result) { - pr_err("%s - registering bus driver failed\n", __func__); - goto exit_bus; - } - - uwb_dbg_init(); - return 0; - -exit_bus: - class_unregister(&uwb_rc_class); -error_uwb_rc_class_register: - uwb_est_destroy(); -error_est_init: - return result; -} -module_init(uwb_subsys_init); - -static void __exit uwb_subsys_exit(void) -{ - uwb_dbg_exit(); - bus_unregister(&uwb_bus_type); - class_unregister(&uwb_rc_class); - uwb_est_destroy(); - return; -} -module_exit(uwb_subsys_exit); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Ultra Wide Band core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/uwb/drp-avail.c b/drivers/staging/uwb/drp-avail.c deleted file mode 100644 index 02392ab82a7d..000000000000 --- a/drivers/staging/uwb/drp-avail.c +++ /dev/null @@ -1,278 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * DRP availability management - * - * Copyright (C) 2005-2006 Intel Corporation - * Reinette Chatre <reinette.chatre@intel.com> - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - * - * Manage DRP Availability (the MAS available for DRP - * reservations). Thus: - * - * - Handle DRP Availability Change notifications - * - * - Allow the reservation manager to indicate MAS reserved/released - * by local (owned by/targeted at the radio controller) - * reservations. - * - * - Based on the two sources above, generate a DRP Availability IE to - * be included in the beacon. - * - * See also the documentation for struct uwb_drp_avail. - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/bitmap.h> -#include "uwb-internal.h" - -/** - * uwb_drp_avail_init - initialize an RC's MAS availability - * - * All MAS are available initially. The RC will inform use which - * slots are used for the BP (it may change in size). - */ -void uwb_drp_avail_init(struct uwb_rc *rc) -{ - bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS); - bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS); - bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS); -} - -/* - * Determine MAS available for new local reservations. - * - * avail = global & local & pending - */ -void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) -{ - bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); - bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); -} - -/** - * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation - * @rc: the radio controller - * @mas: the MAS to reserve - * - * Returns 0 on success, or -EBUSY if the MAS requested aren't available. - */ -int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas) -{ - struct uwb_mas_bm avail; - - uwb_drp_available(rc, &avail); - if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS)) - return -EBUSY; - - bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); - return 0; -} - -/** - * uwb_drp_avail_reserve - reserve MAS for an established reservation - * @rc: the radio controller - * @mas: the MAS to reserve - */ -void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas) -{ - bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); - bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); - rc->drp_avail.ie_valid = false; -} - -/** - * uwb_drp_avail_release - release MAS from a pending or established reservation - * @rc: the radio controller - * @mas: the MAS to release - */ -void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas) -{ - bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); - bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); - rc->drp_avail.ie_valid = false; - uwb_rsv_handle_drp_avail_change(rc); -} - -/** - * uwb_drp_avail_ie_update - update the DRP Availability IE - * @rc: the radio controller - * - * avail = global & local - */ -void uwb_drp_avail_ie_update(struct uwb_rc *rc) -{ - struct uwb_mas_bm avail; - - bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); - - rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY; - rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8; - uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail); - rc->drp_avail.ie_valid = true; -} - -/** - * Create an unsigned long from a buffer containing a byte stream. - * - * @array: pointer to buffer - * @itr: index of buffer from where we start - * @len: the buffer's remaining size may not be exact multiple of - * sizeof(unsigned long), @len is the length of buffer that needs - * to be converted. This will be sizeof(unsigned long) or smaller - * (BUG if not). If it is smaller then we will pad the remaining - * space of the result with zeroes. - */ -static -unsigned long get_val(u8 *array, size_t itr, size_t len) -{ - unsigned long val = 0; - size_t top = itr + len; - - BUG_ON(len > sizeof(val)); - - while (itr < top) { - val <<= 8; - val |= array[top - 1]; - top--; - } - val <<= 8 * (sizeof(val) - len); /* padding */ - return val; -} - -/** - * Initialize bitmap from data buffer. - * - * The bitmap to be converted could come from a IE, for example a - * DRP Availability IE. - * From ECMA-368 1.0 [16.8.7]: " - * octets: 1 1 N * (0 to 32) - * Element ID Length (=N) DRP Availability Bitmap - * - * The DRP Availability Bitmap field is up to 256 bits long, one - * bit for each MAS in the superframe, where the least-significant - * bit of the field corresponds to the first MAS in the superframe - * and successive bits correspond to successive MASs." - * - * The DRP Availability bitmap is in octets from 0 to 32, so octet - * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32 - * octets, the bits in octets not included at the end of the bitmap are - * treated as zero. In this case (when the bitmap is smaller than 32 - * octets) the MAS represented range from MAS 1 to MAS (size of bitmap) - * with the last octet still containing bits for MAS 1-8, etc. - * - * For example: - * F00F0102 03040506 0708090A 0B0C0D0E 0F010203 - * ^^^^ - * |||| - * |||| - * |||\LSB of byte is MAS 9 - * ||\MSB of byte is MAS 16 - * |\LSB of first byte is MAS 1 - * \ MSB of byte is MAS 8 - * - * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11] - * - * The resulting bitmap will have the following mapping: - * bit position 0 == MAS 1 - * bit position 1 == MAS 2 - * ... - * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS - * - * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP) - * @buffer: pointer to buffer containing bitmap data in big endian - * format (MSB first) - * @buffer_size:number of bytes with which bitmap should be initialized - */ -static -void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer, - size_t buffer_size) -{ - u8 *buffer = _buffer; - size_t itr, len; - unsigned long val; - - itr = 0; - while (itr < buffer_size) { - len = buffer_size - itr >= sizeof(val) ? - sizeof(val) : buffer_size - itr; - val = get_val(buffer, itr, len); - bmp_itr[itr / sizeof(val)] = val; - itr += sizeof(val); - } -} - - -/** - * Extract DRP Availability bitmap from the notification. - * - * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes - * We convert that to our internal representation. - */ -static -int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp) -{ - struct device *dev = &evt->rc->uwb_dev.dev; - struct uwb_rc_evt_drp_avail *drp_evt; - int result = -EINVAL; - - /* Is there enough data to decode the event? */ - if (evt->notif.size < sizeof(*drp_evt)) { - dev_err(dev, "DRP Availability Change: Not enough " - "data to decode event [%zu bytes, %zu " - "needed]\n", evt->notif.size, sizeof(*drp_evt)); - goto error; - } - drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb); - buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8); - result = 0; -error: - return result; -} - - -/** - * Process an incoming DRP Availability notification. - * - * @evt: Event information (packs the actual event data, which - * radio controller it came to, etc). - * - * @returns: 0 on success (so uwbd() frees the event buffer), < 0 - * on error. - * - * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that - * the MAS slot is available, bits set to ZERO indicate that the slot - * is busy. - * - * So we clear available slots, we set used slots :) - * - * The notification only marks non-availability based on the BP and - * received DRP IEs that are not for this radio controller. A copy of - * this bitmap is needed to generate the real availability (which - * includes local and pending reservations). - * - * The DRP Availability IE that this radio controller emits will need - * to be updated. - */ -int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt) -{ - int result; - struct uwb_rc *rc = evt->rc; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); - - result = uwbd_evt_get_drp_avail(evt, bmp); - if (result < 0) - return result; - - mutex_lock(&rc->rsvs_mutex); - bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); - rc->drp_avail.ie_valid = false; - uwb_rsv_handle_drp_avail_change(rc); - mutex_unlock(&rc->rsvs_mutex); - - uwb_rsv_sched_update(rc); - - return 0; -} diff --git a/drivers/staging/uwb/drp-ie.c b/drivers/staging/uwb/drp-ie.c deleted file mode 100644 index b2a862cf76de..000000000000 --- a/drivers/staging/uwb/drp-ie.c +++ /dev/null @@ -1,305 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB DRP IE management. - * - * Copyright (C) 2005-2006 Intel Corporation - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/random.h> -#include <linux/slab.h> - -#include "uwb.h" -#include "uwb-internal.h" - - -/* - * Return the reason code for a reservations's DRP IE. - */ -static int uwb_rsv_reason_code(struct uwb_rsv *rsv) -{ - static const int reason_codes[] = { - [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, - [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, - [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, - [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, - [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, - [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, - [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, - [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, - [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, - }; - - return reason_codes[rsv->state]; -} - -/* - * Return the reason code for a reservations's companion DRP IE . - */ -static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) -{ - static const int companion_reason_codes[] = { - [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, - [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, - [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, - }; - - return companion_reason_codes[rsv->state]; -} - -/* - * Return the status bit for a reservations's DRP IE. - */ -int uwb_rsv_status(struct uwb_rsv *rsv) -{ - static const int statuses[] = { - [UWB_RSV_STATE_O_INITIATED] = 0, - [UWB_RSV_STATE_O_PENDING] = 0, - [UWB_RSV_STATE_O_MODIFIED] = 1, - [UWB_RSV_STATE_O_ESTABLISHED] = 1, - [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, - [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, - [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, - [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, - [UWB_RSV_STATE_T_ACCEPTED] = 1, - [UWB_RSV_STATE_T_CONFLICT] = 0, - [UWB_RSV_STATE_T_PENDING] = 0, - [UWB_RSV_STATE_T_DENIED] = 0, - [UWB_RSV_STATE_T_RESIZED] = 1, - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, - [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, - [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, - - }; - - return statuses[rsv->state]; -} - -/* - * Return the status bit for a reservations's companion DRP IE . - */ -int uwb_rsv_companion_status(struct uwb_rsv *rsv) -{ - static const int companion_statuses[] = { - [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, - [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, - [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, - }; - - return companion_statuses[rsv->state]; -} - -/* - * Allocate a DRP IE. - * - * To save having to free/allocate a DRP IE when its MAS changes, - * enough memory is allocated for the maxiumum number of DRP - * allocation fields. This gives an overhead per reservation of up to - * (UWB_NUM_ZONES - 1) * 4 = 60 octets. - */ -static struct uwb_ie_drp *uwb_drp_ie_alloc(void) -{ - struct uwb_ie_drp *drp_ie; - - drp_ie = kzalloc(struct_size(drp_ie, allocs, UWB_NUM_ZONES), - GFP_KERNEL); - if (drp_ie) - drp_ie->hdr.element_id = UWB_IE_DRP; - return drp_ie; -} - - -/* - * Fill a DRP IE's allocation fields from a MAS bitmap. - */ -static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, - struct uwb_mas_bm *mas) -{ - int z, i, num_fields = 0, next = 0; - struct uwb_drp_alloc *zones; - __le16 current_bmp; - DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); - DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); - - zones = drp_ie->allocs; - - bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); - - /* Determine unique MAS bitmaps in zones from bitmap. */ - for (z = 0; z < UWB_NUM_ZONES; z++) { - bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); - if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { - bool found = false; - current_bmp = (__le16) *tmp_mas_bm; - for (i = 0; i < next; i++) { - if (current_bmp == zones[i].mas_bm) { - zones[i].zone_bm |= 1 << z; - found = true; - break; - } - } - if (!found) { - num_fields++; - zones[next].zone_bm = 1 << z; - zones[next].mas_bm = current_bmp; - next++; - } - } - bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); - } - - /* Store in format ready for transmission (le16). */ - for (i = 0; i < num_fields; i++) { - drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); - drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); - } - - drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) - + num_fields * sizeof(struct uwb_drp_alloc); -} - -/** - * uwb_drp_ie_update - update a reservation's DRP IE - * @rsv: the reservation - */ -int uwb_drp_ie_update(struct uwb_rsv *rsv) -{ - struct uwb_ie_drp *drp_ie; - struct uwb_rsv_move *mv; - int unsafe; - - if (rsv->state == UWB_RSV_STATE_NONE) { - kfree(rsv->drp_ie); - rsv->drp_ie = NULL; - return 0; - } - - unsafe = rsv->mas.unsafe ? 1 : 0; - - if (rsv->drp_ie == NULL) { - rsv->drp_ie = uwb_drp_ie_alloc(); - if (rsv->drp_ie == NULL) - return -ENOMEM; - } - drp_ie = rsv->drp_ie; - - uwb_ie_drp_set_unsafe(drp_ie, unsafe); - uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); - uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); - uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); - uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); - uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); - uwb_ie_drp_set_type(drp_ie, rsv->type); - - if (uwb_rsv_is_owner(rsv)) { - switch (rsv->target.type) { - case UWB_RSV_TARGET_DEV: - drp_ie->dev_addr = rsv->target.dev->dev_addr; - break; - case UWB_RSV_TARGET_DEVADDR: - drp_ie->dev_addr = rsv->target.devaddr; - break; - } - } else - drp_ie->dev_addr = rsv->owner->dev_addr; - - uwb_drp_ie_from_bm(drp_ie, &rsv->mas); - - if (uwb_rsv_has_two_drp_ies(rsv)) { - mv = &rsv->mv; - if (mv->companion_drp_ie == NULL) { - mv->companion_drp_ie = uwb_drp_ie_alloc(); - if (mv->companion_drp_ie == NULL) - return -ENOMEM; - } - drp_ie = mv->companion_drp_ie; - - /* keep all the same configuration of the main drp_ie */ - memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); - - - /* FIXME: handle properly the unsafe bit */ - uwb_ie_drp_set_unsafe(drp_ie, 1); - uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); - uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); - - uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); - } - - rsv->ie_valid = true; - return 0; -} - -/* - * Set MAS bits from given MAS bitmap in a single zone of large bitmap. - * - * We are given a zone id and the MAS bitmap of bits that need to be set in - * this zone. Note that this zone may already have bits set and this only - * adds settings - we cannot simply assign the MAS bitmap contents to the - * zone contents. We iterate over the the bits (MAS) in the zone and set the - * bits that are set in the given MAS bitmap. - */ -static -void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) -{ - int mas; - u16 mas_mask; - - for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { - mas_mask = 1 << mas; - if (mas_bm & mas_mask) - set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); - } -} - -/** - * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap - * @mas: MAS bitmap that will be populated to correspond to the - * allocation fields in the DRP IE - * @drp_ie: the DRP IE that contains the allocation fields. - * - * The input format is an array of MAS allocation fields (16 bit Zone - * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section - * 16.8.6. The output is a full 256 bit MAS bitmap. - * - * We go over all the allocation fields, for each allocation field we - * know which zones are impacted. We iterate over all the zones - * impacted and call a function that will set the correct MAS bits in - * each zone. - */ -void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) -{ - int numallocs = (drp_ie->hdr.length - 4) / 4; - const struct uwb_drp_alloc *alloc; - int cnt; - u16 zone_bm, mas_bm; - u8 zone; - u16 zone_mask; - - bitmap_zero(bm->bm, UWB_NUM_MAS); - - for (cnt = 0; cnt < numallocs; cnt++) { - alloc = &drp_ie->allocs[cnt]; - zone_bm = le16_to_cpu(alloc->zone_bm); - mas_bm = le16_to_cpu(alloc->mas_bm); - for (zone = 0; zone < UWB_NUM_ZONES; zone++) { - zone_mask = 1 << zone; - if (zone_bm & zone_mask) - uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); - } - } -} - diff --git a/drivers/staging/uwb/drp.c b/drivers/staging/uwb/drp.c deleted file mode 100644 index 869987bede7b..000000000000 --- a/drivers/staging/uwb/drp.c +++ /dev/null @@ -1,842 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Dynamic Reservation Protocol handling - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kthread.h> -#include <linux/freezer.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include "uwb-internal.h" - - -/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ -enum uwb_drp_conflict_action { - /* Reservation is maintained, no action needed */ - UWB_DRP_CONFLICT_MANTAIN = 0, - - /* the device shall not transmit frames in conflicting MASs in - * the following superframe. If the device is the reservation - * target, it shall also set the Reason Code in its DRP IE to - * Conflict in its beacon in the following superframe. - */ - UWB_DRP_CONFLICT_ACT1, - - /* the device shall not set the Reservation Status bit to ONE - * and shall not transmit frames in conflicting MASs. If the - * device is the reservation target, it shall also set the - * Reason Code in its DRP IE to Conflict. - */ - UWB_DRP_CONFLICT_ACT2, - - /* the device shall not transmit frames in conflicting MASs in - * the following superframe. It shall remove the conflicting - * MASs from the reservation or set the Reservation Status to - * ZERO in its beacon in the following superframe. If the - * device is the reservation target, it shall also set the - * Reason Code in its DRP IE to Conflict. - */ - UWB_DRP_CONFLICT_ACT3, -}; - - -static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, - struct uwb_rceb *reply, ssize_t reply_size) -{ - struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; - unsigned long flags; - - if (r != NULL) { - if (r->bResultCode != UWB_RC_RES_SUCCESS) - dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", - uwb_rc_strerror(r->bResultCode), r->bResultCode); - } else - dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); - - spin_lock_irqsave(&rc->rsvs_lock, flags); - if (rc->set_drp_ie_pending > 1) { - rc->set_drp_ie_pending = 0; - uwb_rsv_queue_update(rc); - } else { - rc->set_drp_ie_pending = 0; - } - spin_unlock_irqrestore(&rc->rsvs_lock, flags); -} - -/** - * Construct and send the SET DRP IE - * - * @rc: UWB Host controller - * @returns: >= 0 number of bytes still available in the beacon - * < 0 errno code on error. - * - * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the - * device to include in its beacon at the same time. We thus have to - * traverse all reservations and include the DRP IEs of all PENDING - * and NEGOTIATED reservations in a SET DRP command for transmission. - * - * A DRP Availability IE is appended. - * - * rc->rsvs_mutex is held - * - * FIXME We currently ignore the returned value indicating the remaining space - * in beacon. This could be used to deny reservation requests earlier if - * determined that they would cause the beacon space to be exceeded. - */ -int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) -{ - int result; - struct uwb_rc_cmd_set_drp_ie *cmd; - struct uwb_rsv *rsv; - struct uwb_rsv_move *mv; - int num_bytes = 0; - u8 *IEDataptr; - - result = -ENOMEM; - /* First traverse all reservations to determine memory needed. */ - list_for_each_entry(rsv, &rc->reservations, rc_node) { - if (rsv->drp_ie != NULL) { - num_bytes += rsv->drp_ie->hdr.length + 2; - if (uwb_rsv_has_two_drp_ies(rsv) && - (rsv->mv.companion_drp_ie != NULL)) { - mv = &rsv->mv; - num_bytes += - mv->companion_drp_ie->hdr.length + 2; - } - } - } - num_bytes += sizeof(rc->drp_avail.ie); - cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); - if (cmd == NULL) - goto error; - cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; - cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE); - cmd->wIELength = num_bytes; - IEDataptr = (u8 *)&cmd->IEData[0]; - - /* FIXME: DRV avail IE is not always needed */ - /* put DRP avail IE first */ - memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); - IEDataptr += sizeof(struct uwb_ie_drp_avail); - - /* Next traverse all reservations to place IEs in allocated memory. */ - list_for_each_entry(rsv, &rc->reservations, rc_node) { - if (rsv->drp_ie != NULL) { - memcpy(IEDataptr, rsv->drp_ie, - rsv->drp_ie->hdr.length + 2); - IEDataptr += rsv->drp_ie->hdr.length + 2; - - if (uwb_rsv_has_two_drp_ies(rsv) && - (rsv->mv.companion_drp_ie != NULL)) { - mv = &rsv->mv; - memcpy(IEDataptr, mv->companion_drp_ie, - mv->companion_drp_ie->hdr.length + 2); - IEDataptr += - mv->companion_drp_ie->hdr.length + 2; - } - } - } - - result = uwb_rc_cmd_async(rc, "SET-DRP-IE", - &cmd->rccb, sizeof(*cmd) + num_bytes, - UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, - uwb_rc_set_drp_cmd_done, NULL); - - rc->set_drp_ie_pending = 1; - - kfree(cmd); -error: - return result; -} - -/* - * Evaluate the action to perform using conflict resolution rules - * - * Return a uwb_drp_conflict_action. - */ -static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, - struct uwb_rsv *rsv, int our_status) -{ - int our_tie_breaker = rsv->tiebreaker; - int our_type = rsv->type; - int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; - - int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); - int ext_status = uwb_ie_drp_status(ext_drp_ie); - int ext_type = uwb_ie_drp_type(ext_drp_ie); - - - /* [ECMA-368 2nd Edition] 17.4.6 */ - if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { - return UWB_DRP_CONFLICT_MANTAIN; - } - - /* [ECMA-368 2nd Edition] 17.4.6-1 */ - if (our_type == UWB_DRP_TYPE_ALIEN_BP) { - return UWB_DRP_CONFLICT_MANTAIN; - } - - /* [ECMA-368 2nd Edition] 17.4.6-2 */ - if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { - /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ - return UWB_DRP_CONFLICT_ACT1; - } - - /* [ECMA-368 2nd Edition] 17.4.6-3 */ - if (our_status == 0 && ext_status == 1) { - return UWB_DRP_CONFLICT_ACT2; - } - - /* [ECMA-368 2nd Edition] 17.4.6-4 */ - if (our_status == 1 && ext_status == 0) { - return UWB_DRP_CONFLICT_MANTAIN; - } - - /* [ECMA-368 2nd Edition] 17.4.6-5a */ - if (our_tie_breaker == ext_tie_breaker && - our_beacon_slot < ext_beacon_slot) { - return UWB_DRP_CONFLICT_MANTAIN; - } - - /* [ECMA-368 2nd Edition] 17.4.6-5b */ - if (our_tie_breaker != ext_tie_breaker && - our_beacon_slot > ext_beacon_slot) { - return UWB_DRP_CONFLICT_MANTAIN; - } - - if (our_status == 0) { - if (our_tie_breaker == ext_tie_breaker) { - /* [ECMA-368 2nd Edition] 17.4.6-6a */ - if (our_beacon_slot > ext_beacon_slot) { - return UWB_DRP_CONFLICT_ACT2; - } - } else { - /* [ECMA-368 2nd Edition] 17.4.6-6b */ - if (our_beacon_slot < ext_beacon_slot) { - return UWB_DRP_CONFLICT_ACT2; - } - } - } else { - if (our_tie_breaker == ext_tie_breaker) { - /* [ECMA-368 2nd Edition] 17.4.6-7a */ - if (our_beacon_slot > ext_beacon_slot) { - return UWB_DRP_CONFLICT_ACT3; - } - } else { - /* [ECMA-368 2nd Edition] 17.4.6-7b */ - if (our_beacon_slot < ext_beacon_slot) { - return UWB_DRP_CONFLICT_ACT3; - } - } - } - return UWB_DRP_CONFLICT_MANTAIN; -} - -static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, - int ext_beacon_slot, - struct uwb_rsv *rsv, - struct uwb_mas_bm *conflicting_mas) -{ - struct uwb_rc *rc = rsv->rc; - struct uwb_rsv_move *mv = &rsv->mv; - struct uwb_drp_backoff_win *bow = &rc->bow; - int action; - - action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); - - if (uwb_rsv_is_owner(rsv)) { - switch(action) { - case UWB_DRP_CONFLICT_ACT2: - /* try move */ - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); - if (bow->can_reserve_extra_mases == false) - uwb_rsv_backoff_win_increment(rc); - - break; - case UWB_DRP_CONFLICT_ACT3: - uwb_rsv_backoff_win_increment(rc); - /* drop some mases with reason modified */ - /* put in the companion the mases to be dropped */ - bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); - default: - break; - } - } else { - switch(action) { - case UWB_DRP_CONFLICT_ACT2: - case UWB_DRP_CONFLICT_ACT3: - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); - default: - break; - } - - } - -} - -static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, - struct uwb_rsv *rsv, bool companion_only, - struct uwb_mas_bm *conflicting_mas) -{ - struct uwb_rc *rc = rsv->rc; - struct uwb_drp_backoff_win *bow = &rc->bow; - struct uwb_rsv_move *mv = &rsv->mv; - int action; - - if (companion_only) { - /* status of companion is 0 at this point */ - action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); - if (uwb_rsv_is_owner(rsv)) { - switch(action) { - case UWB_DRP_CONFLICT_ACT2: - case UWB_DRP_CONFLICT_ACT3: - uwb_rsv_set_state(rsv, - UWB_RSV_STATE_O_ESTABLISHED); - rsv->needs_release_companion_mas = false; - if (bow->can_reserve_extra_mases == false) - uwb_rsv_backoff_win_increment(rc); - uwb_drp_avail_release(rsv->rc, - &rsv->mv.companion_mas); - } - } else { /* rsv is target */ - switch(action) { - case UWB_DRP_CONFLICT_ACT2: - case UWB_DRP_CONFLICT_ACT3: - uwb_rsv_set_state(rsv, - UWB_RSV_STATE_T_EXPANDING_CONFLICT); - /* send_drp_avail_ie = true; */ - } - } - } else { /* also base part of the reservation is conflicting */ - if (uwb_rsv_is_owner(rsv)) { - uwb_rsv_backoff_win_increment(rc); - /* remove companion part */ - uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); - - /* drop some mases with reason modified */ - - /* put in the companion the mases to be dropped */ - bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, - conflicting_mas->bm, UWB_NUM_MAS); - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); - } else { /* it is a target rsv */ - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); - /* send_drp_avail_ie = true; */ - } - } -} - -static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, - struct uwb_rc_evt_drp *drp_evt, - struct uwb_ie_drp *drp_ie, - struct uwb_mas_bm *conflicting_mas) -{ - struct uwb_rsv_move *mv; - - /* check if the conflicting reservation has two drp_ies */ - if (uwb_rsv_has_two_drp_ies(rsv)) { - mv = &rsv->mv; - if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, - UWB_NUM_MAS)) { - handle_conflict_expanding(drp_ie, - drp_evt->beacon_slot_number, - rsv, false, conflicting_mas); - } else { - if (bitmap_intersects(mv->companion_mas.bm, - conflicting_mas->bm, UWB_NUM_MAS)) { - handle_conflict_expanding( - drp_ie, drp_evt->beacon_slot_number, - rsv, true, conflicting_mas); - } - } - } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, - UWB_NUM_MAS)) { - handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, - rsv, conflicting_mas); - } -} - -static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, - struct uwb_rc_evt_drp *drp_evt, - struct uwb_ie_drp *drp_ie, - struct uwb_mas_bm *conflicting_mas) -{ - struct uwb_rsv *rsv; - - list_for_each_entry(rsv, &rc->reservations, rc_node) { - uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, - conflicting_mas); - } -} - -static void uwb_drp_process_target_accepted(struct uwb_rc *rc, - struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt, - struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas) -{ - struct uwb_rsv_move *mv = &rsv->mv; - int status; - - status = uwb_ie_drp_status(drp_ie); - - if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); - return; - } - - if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { - /* drp_ie is companion */ - if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) { - /* stroke companion */ - uwb_rsv_set_state(rsv, - UWB_RSV_STATE_T_EXPANDING_ACCEPTED); - } - } else { - if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) { - if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) { - /* FIXME: there is a conflict, find - * the conflicting reservations and - * take a sensible action. Consider - * that in drp_ie there is the - * "neighbour" */ - uwb_drp_handle_all_conflict_rsv(rc, drp_evt, - drp_ie, mas); - } else { - /* accept the extra reservation */ - bitmap_copy(mv->companion_mas.bm, mas->bm, - UWB_NUM_MAS); - uwb_rsv_set_state(rsv, - UWB_RSV_STATE_T_EXPANDING_ACCEPTED); - } - } else { - if (status) { - uwb_rsv_set_state(rsv, - UWB_RSV_STATE_T_ACCEPTED); - } - } - - } -} - -/* - * Based on the DRP IE, transition a target reservation to a new - * state. - */ -static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, - struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rsv_move *mv = &rsv->mv; - int status; - enum uwb_drp_reason reason_code; - struct uwb_mas_bm mas; - - status = uwb_ie_drp_status(drp_ie); - reason_code = uwb_ie_drp_reason_code(drp_ie); - uwb_drp_ie_to_bm(&mas, drp_ie); - - switch (reason_code) { - case UWB_DRP_REASON_ACCEPTED: - uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas); - break; - - case UWB_DRP_REASON_MODIFIED: - /* check to see if we have already modified the reservation */ - if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); - break; - } - - /* find if the owner wants to expand or reduce */ - if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { - /* owner is reducing */ - bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, - UWB_NUM_MAS); - uwb_drp_avail_release(rsv->rc, &mv->companion_mas); - } - - bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); - break; - default: - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", - reason_code, status); - } -} - -static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv, - struct uwb_mas_bm *mas) -{ - struct uwb_rsv_move *mv = &rsv->mv; - - switch (rsv->state) { - case UWB_RSV_STATE_O_PENDING: - case UWB_RSV_STATE_O_INITIATED: - case UWB_RSV_STATE_O_ESTABLISHED: - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); - break; - case UWB_RSV_STATE_O_MODIFIED: - if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS)) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); - else - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); - break; - - case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ - if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS)) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); - else - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); - break; - case UWB_RSV_STATE_O_MOVE_EXPANDING: - if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) { - /* Companion reservation accepted */ - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); - } else { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); - } - break; - case UWB_RSV_STATE_O_MOVE_COMBINING: - if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS)) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); - else - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); - break; - default: - break; - } -} -/* - * Based on the DRP IE, transition an owner reservation to a new - * state. - */ -static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, - struct uwb_dev *src, struct uwb_ie_drp *drp_ie, - struct uwb_rc_evt_drp *drp_evt) -{ - struct device *dev = &rc->uwb_dev.dev; - int status; - enum uwb_drp_reason reason_code; - struct uwb_mas_bm mas; - - status = uwb_ie_drp_status(drp_ie); - reason_code = uwb_ie_drp_reason_code(drp_ie); - uwb_drp_ie_to_bm(&mas, drp_ie); - - if (status) { - switch (reason_code) { - case UWB_DRP_REASON_ACCEPTED: - uwb_drp_process_owner_accepted(rsv, &mas); - break; - default: - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", - reason_code, status); - } - } else { - switch (reason_code) { - case UWB_DRP_REASON_PENDING: - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING); - break; - case UWB_DRP_REASON_DENIED: - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); - break; - case UWB_DRP_REASON_CONFLICT: - /* resolve the conflict */ - bitmap_complement(mas.bm, src->last_availability_bm, - UWB_NUM_MAS); - uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); - break; - default: - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", - reason_code, status); - } - } -} - -static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) -{ - unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; - mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); -} - -static void uwb_cnflt_update_work(struct work_struct *work) -{ - struct uwb_cnflt_alien *cnflt = container_of(work, - struct uwb_cnflt_alien, - cnflt_update_work); - struct uwb_cnflt_alien *c; - struct uwb_rc *rc = cnflt->rc; - - unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; - - mutex_lock(&rc->rsvs_mutex); - - list_del(&cnflt->rc_node); - - /* update rc global conflicting alien bitmap */ - bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); - - list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { - bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, - c->mas.bm, UWB_NUM_MAS); - } - - queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, - usecs_to_jiffies(delay_us)); - - kfree(cnflt); - mutex_unlock(&rc->rsvs_mutex); -} - -static void uwb_cnflt_timer(struct timer_list *t) -{ - struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer); - - queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); -} - -/* - * We have received an DRP_IE of type Alien BP and we need to make - * sure we do not transmit in conflicting MASs. - */ -static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_mas_bm mas; - struct uwb_cnflt_alien *cnflt; - unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; - - uwb_drp_ie_to_bm(&mas, drp_ie); - - list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { - if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { - /* Existing alien BP reservation conflicting - * bitmap, just reset the timer */ - uwb_cnflt_alien_stroke_timer(cnflt); - return; - } - } - - /* New alien BP reservation conflicting bitmap */ - - /* alloc and initialize new uwb_cnflt_alien */ - cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); - if (!cnflt) { - dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); - return; - } - - INIT_LIST_HEAD(&cnflt->rc_node); - timer_setup(&cnflt->timer, uwb_cnflt_timer, 0); - - cnflt->rc = rc; - INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); - - bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); - - list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); - - /* update rc global conflicting alien bitmap */ - bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); - - queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); - - /* start the timer */ - uwb_cnflt_alien_stroke_timer(cnflt); -} - -static void uwb_drp_process_not_involved(struct uwb_rc *rc, - struct uwb_rc_evt_drp *drp_evt, - struct uwb_ie_drp *drp_ie) -{ - struct uwb_mas_bm mas; - - uwb_drp_ie_to_bm(&mas, drp_ie); - uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); -} - -static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, - struct uwb_rc_evt_drp *drp_evt, - struct uwb_ie_drp *drp_ie) -{ - struct uwb_rsv *rsv; - - rsv = uwb_rsv_find(rc, src, drp_ie); - if (!rsv) { - /* - * No reservation? It's either for a recently - * terminated reservation; or the DRP IE couldn't be - * processed (e.g., an invalid IE or out of memory). - */ - return; - } - - /* - * Do nothing with DRP IEs for reservations that have been - * terminated. - */ - if (rsv->state == UWB_RSV_STATE_NONE) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); - return; - } - - if (uwb_ie_drp_owner(drp_ie)) - uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); - else - uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); - -} - - -static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) -{ - return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; -} - -/* - * Process a received DRP IE. - */ -static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, - struct uwb_dev *src, struct uwb_ie_drp *drp_ie) -{ - if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) - uwb_drp_handle_alien_drp(rc, drp_ie); - else if (uwb_drp_involves_us(rc, drp_ie)) - uwb_drp_process_involved(rc, src, drp_evt, drp_ie); - else - uwb_drp_process_not_involved(rc, drp_evt, drp_ie); -} - -/* - * Process a received DRP Availability IE - */ -static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, - struct uwb_ie_drp_avail *drp_availability_ie) -{ - bitmap_copy(src->last_availability_bm, - drp_availability_ie->bmp, UWB_NUM_MAS); -} - -/* - * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) - * from a device. - */ -static -void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, - size_t ielen, struct uwb_dev *src_dev) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_ie_hdr *ie_hdr; - void *ptr; - - ptr = drp_evt->ie_data; - for (;;) { - ie_hdr = uwb_ie_next(&ptr, &ielen); - if (!ie_hdr) - break; - - switch (ie_hdr->element_id) { - case UWB_IE_DRP_AVAILABILITY: - uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); - break; - case UWB_IE_DRP: - uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); - break; - default: - dev_warn(dev, "unexpected IE in DRP notification\n"); - break; - } - } - - if (ielen > 0) - dev_warn(dev, "%d octets remaining in DRP notification\n", - (int)ielen); -} - -/** - * uwbd_evt_handle_rc_drp - handle a DRP_IE event - * @evt: the DRP_IE event from the radio controller - * - * This processes DRP notifications from the radio controller, either - * initiating a new reservation or transitioning an existing - * reservation into a different state. - * - * DRP notifications can occur for three different reasons: - * - * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as - * the target or source have been received. - * - * These DRP IEs could be new or for an existing reservation. - * - * If the DRP IE for an existing reservation ceases to be to - * received for at least mMaxLostBeacons, the reservation should be - * considered to be terminated. Note that the TERMINATE reason (see - * below) may not always be signalled (e.g., the remote device has - * two or more reservations established with the RC). - * - * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon - * group conflict with the RC's reservations. - * - * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received - * from a device (i.e., it's terminated all reservations). - * - * Only the software state of the reservations is changed; the setting - * of the radio controller's DRP IEs is done after all the events in - * an event buffer are processed. This saves waiting multiple times - * for the SET_DRP_IE command to complete. - */ -int uwbd_evt_handle_rc_drp(struct uwb_event *evt) -{ - struct device *dev = &evt->rc->uwb_dev.dev; - struct uwb_rc *rc = evt->rc; - struct uwb_rc_evt_drp *drp_evt; - size_t ielength, bytes_left; - struct uwb_dev_addr src_addr; - struct uwb_dev *src_dev; - - /* Is there enough data to decode the event (and any IEs in - its payload)? */ - if (evt->notif.size < sizeof(*drp_evt)) { - dev_err(dev, "DRP event: Not enough data to decode event " - "[%zu bytes left, %zu needed]\n", - evt->notif.size, sizeof(*drp_evt)); - return 0; - } - bytes_left = evt->notif.size - sizeof(*drp_evt); - drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb); - ielength = le16_to_cpu(drp_evt->ie_length); - if (bytes_left != ielength) { - dev_err(dev, "DRP event: Not enough data in payload [%zu" - "bytes left, %zu declared in the event]\n", - bytes_left, ielength); - return 0; - } - - memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr)); - src_dev = uwb_dev_get_by_devaddr(rc, &src_addr); - if (!src_dev) { - /* - * A DRP notification from an unrecognized device. - * - * This is probably from a WUSB device that doesn't - * have an EUI-48 and therefore doesn't show up in the - * UWB device database. It's safe to simply ignore - * these. - */ - return 0; - } - - mutex_lock(&rc->rsvs_mutex); - - /* We do not distinguish from the reason */ - uwb_drp_process_all(rc, drp_evt, ielength, src_dev); - - mutex_unlock(&rc->rsvs_mutex); - - uwb_dev_put(src_dev); - return 0; -} diff --git a/drivers/staging/uwb/est.c b/drivers/staging/uwb/est.c deleted file mode 100644 index d4141ffdd775..000000000000 --- a/drivers/staging/uwb/est.c +++ /dev/null @@ -1,450 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band Radio Control - * Event Size Tables management - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * - * Infrastructure, code and data tables for guessing the size of - * events received on the notification endpoints of UWB radio - * controllers. - * - * You define a table of events and for each, its size and how to get - * the extra size. - * - * ENTRY POINTS: - * - * uwb_est_{init/destroy}(): To initialize/release the EST subsystem. - * - * uwb_est_[u]register(): To un/register event size tables - * uwb_est_grow() - * - * uwb_est_find_size(): Get the size of an event - * uwb_est_get_size() - */ -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/export.h> - -#include "uwb-internal.h" - -struct uwb_est { - u16 type_event_high; - u16 vendor, product; - u8 entries; - const struct uwb_est_entry *entry; -}; - -static struct uwb_est *uwb_est; -static u8 uwb_est_size; -static u8 uwb_est_used; -static DEFINE_RWLOCK(uwb_est_lock); - -/** - * WUSB Standard Event Size Table, HWA-RC interface - * - * Sizes for events and notifications type 0 (general), high nibble 0. - */ -static -struct uwb_est_entry uwb_est_00_00xx[] = { - [UWB_RC_EVT_IE_RCV] = { - .size = sizeof(struct uwb_rc_evt_ie_rcv), - .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength), - }, - [UWB_RC_EVT_BEACON] = { - .size = sizeof(struct uwb_rc_evt_beacon), - .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength), - }, - [UWB_RC_EVT_BEACON_SIZE] = { - .size = sizeof(struct uwb_rc_evt_beacon_size), - }, - [UWB_RC_EVT_BPOIE_CHANGE] = { - .size = sizeof(struct uwb_rc_evt_bpoie_change), - .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change, - wBPOIELength), - }, - [UWB_RC_EVT_BP_SLOT_CHANGE] = { - .size = sizeof(struct uwb_rc_evt_bp_slot_change), - }, - [UWB_RC_EVT_BP_SWITCH_IE_RCV] = { - .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv), - .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength), - }, - [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { - .size = sizeof(struct uwb_rc_evt_dev_addr_conflict), - }, - [UWB_RC_EVT_DRP_AVAIL] = { - .size = sizeof(struct uwb_rc_evt_drp_avail) - }, - [UWB_RC_EVT_DRP] = { - .size = sizeof(struct uwb_rc_evt_drp), - .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length), - }, - [UWB_RC_EVT_BP_SWITCH_STATUS] = { - .size = sizeof(struct uwb_rc_evt_bp_switch_status), - }, - [UWB_RC_EVT_CMD_FRAME_RCV] = { - .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv), - .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength), - }, - [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = { - .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv), - .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength), - }, - [UWB_RC_CMD_CHANNEL_CHANGE] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_DEV_ADDR_MGMT] = { - .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) }, - [UWB_RC_CMD_GET_IE] = { - .size = sizeof(struct uwb_rc_evt_get_ie), - .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength), - }, - [UWB_RC_CMD_RESET] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SCAN] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SET_BEACON_FILTER] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SET_DRP_IE] = { - .size = sizeof(struct uwb_rc_evt_set_drp_ie), - }, - [UWB_RC_CMD_SET_IE] = { - .size = sizeof(struct uwb_rc_evt_set_ie), - }, - [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SET_TX_POWER] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SLEEP] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_START_BEACON] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_STOP_BEACON] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_BP_MERGE] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SEND_COMMAND_FRAME] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, - [UWB_RC_CMD_SET_ASIE_NOTIF] = { - .size = sizeof(struct uwb_rc_evt_confirm), - }, -}; - -static -struct uwb_est_entry uwb_est_01_00xx[] = { - [UWB_RC_DAA_ENERGY_DETECTED] = { - .size = sizeof(struct uwb_rc_evt_daa_energy_detected), - }, - [UWB_RC_SET_DAA_ENERGY_MASK] = { - .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask), - }, - [UWB_RC_SET_NOTIFICATION_FILTER_EX] = { - .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex), - }, -}; - -/** - * Initialize the EST subsystem - * - * Register the standard tables also. - * - * FIXME: tag init - */ -int uwb_est_create(void) -{ - int result; - - uwb_est_size = 2; - uwb_est_used = 0; - uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL); - if (uwb_est == NULL) - return -ENOMEM; - - result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff, - uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx)); - if (result < 0) - goto out; - result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff, - uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx)); -out: - return result; -} - - -/** Clean it up */ -void uwb_est_destroy(void) -{ - kfree(uwb_est); - uwb_est = NULL; - uwb_est_size = uwb_est_used = 0; -} - - -/** - * Double the capacity of the EST table - * - * @returns 0 if ok, < 0 errno no error. - */ -static -int uwb_est_grow(void) -{ - size_t actual_size = uwb_est_size * sizeof(uwb_est[0]); - void *new = kmalloc_array(2, actual_size, GFP_ATOMIC); - if (new == NULL) - return -ENOMEM; - memcpy(new, uwb_est, actual_size); - memset(new + actual_size, 0, actual_size); - kfree(uwb_est); - uwb_est = new; - uwb_est_size *= 2; - return 0; -} - - -/** - * Register an event size table - * - * Makes room for it if the table is full, and then inserts it in the - * right position (entries are sorted by type, event_high, vendor and - * then product). - * - * @vendor: vendor code for matching against the device (0x0000 and - * 0xffff mean any); use 0x0000 to force all to match without - * checking possible vendor specific ones, 0xfffff to match - * after checking vendor specific ones. - * - * @product: product code from that vendor; same matching rules, use - * 0x0000 for not allowing vendor specific matches, 0xffff - * for allowing. - * - * This arragement just makes the tables sort differenty. Because the - * table is sorted by growing type-event_high-vendor-product, a zero - * vendor will match before than a 0x456a vendor, that will match - * before a 0xfffff vendor. - * - * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). - */ -/* FIXME: add bus type to vendor/product code */ -int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product, - const struct uwb_est_entry *entry, size_t entries) -{ - unsigned long flags; - unsigned itr; - int result = 0; - - write_lock_irqsave(&uwb_est_lock, flags); - if (uwb_est_used == uwb_est_size) { - result = uwb_est_grow(); - if (result < 0) - goto out; - } - /* Find the right spot to insert it in */ - for (itr = 0; itr < uwb_est_used; itr++) - if (uwb_est[itr].type_event_high < type - && uwb_est[itr].vendor < vendor - && uwb_est[itr].product < product) - break; - - /* Shift others to make room for the new one? */ - if (itr < uwb_est_used) - memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr); - uwb_est[itr].type_event_high = type << 8 | event_high; - uwb_est[itr].vendor = vendor; - uwb_est[itr].product = product; - uwb_est[itr].entry = entry; - uwb_est[itr].entries = entries; - uwb_est_used++; -out: - write_unlock_irqrestore(&uwb_est_lock, flags); - return result; -} -EXPORT_SYMBOL_GPL(uwb_est_register); - - -/** - * Unregister an event size table - * - * This just removes the specified entry and moves the ones after it - * to fill in the gap. This is needed to keep the list sorted; no - * reallocation is done to reduce the size of the table. - * - * We unregister by all the data we used to register instead of by - * pointer to the @entry array because we might have used the same - * table for a bunch of IDs (for example). - * - * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). - */ -int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product, - const struct uwb_est_entry *entry, size_t entries) -{ - unsigned long flags; - unsigned itr; - struct uwb_est est_cmp = { - .type_event_high = type << 8 | event_high, - .vendor = vendor, - .product = product, - .entry = entry, - .entries = entries - }; - write_lock_irqsave(&uwb_est_lock, flags); - for (itr = 0; itr < uwb_est_used; itr++) - if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp))) - goto found; - write_unlock_irqrestore(&uwb_est_lock, flags); - return -ENOENT; - -found: - if (itr < uwb_est_used - 1) /* Not last one? move ones above */ - memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1); - uwb_est_used--; - write_unlock_irqrestore(&uwb_est_lock, flags); - return 0; -} -EXPORT_SYMBOL_GPL(uwb_est_unregister); - - -/** - * Get the size of an event from a table - * - * @rceb: pointer to the buffer with the event - * @rceb_size: size of the area pointed to by @rceb in bytes. - * @returns: > 0 Size of the event - * -ENOSPC An area big enough was not provided to look - * ahead into the event's guts and guess the size. - * -EINVAL Unknown event code (wEvent). - * - * This will look at the received RCEB and guess what is the total - * size. For variable sized events, it will look further ahead into - * their length field to see how much data should be read. - * - * Note this size is *not* final--the neh (Notification/Event Handle) - * might specificy an extra size to add. - */ -static -ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est, - u8 event_low, const struct uwb_rceb *rceb, - size_t rceb_size) -{ - unsigned offset; - ssize_t size; - struct device *dev = &uwb_rc->uwb_dev.dev; - const struct uwb_est_entry *entry; - - size = -ENOENT; - if (event_low >= est->entries) { /* in range? */ - dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n", - est, est->type_event_high, est->vendor, est->product, - est->entries, event_low); - goto out; - } - size = -ENOENT; - entry = &est->entry[event_low]; - if (entry->size == 0 && entry->offset == 0) { /* unknown? */ - dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n", - est, est->type_event_high, est->vendor, est->product, - est->entries, event_low); - goto out; - } - offset = entry->offset; /* extra fries with that? */ - if (offset == 0) - size = entry->size; - else { - /* Ops, got an extra size field at 'offset'--read it */ - const void *ptr = rceb; - size_t type_size = 0; - offset--; - size = -ENOSPC; /* enough data for more? */ - switch (entry->type) { - case UWB_EST_16: type_size = sizeof(__le16); break; - case UWB_EST_8: type_size = sizeof(u8); break; - default: BUG(); - } - if (offset + type_size > rceb_size) { - dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " - "not enough data to read extra size\n", - est, est->type_event_high, est->vendor, - est->product, est->entries); - goto out; - } - size = entry->size; - ptr += offset; - switch (entry->type) { - case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break; - case UWB_EST_8: size += *(u8 *)ptr; break; - default: BUG(); - } - } -out: - return size; -} - - -/** - * Guesses the size of a WA event - * - * @rceb: pointer to the buffer with the event - * @rceb_size: size of the area pointed to by @rceb in bytes. - * @returns: > 0 Size of the event - * -ENOSPC An area big enough was not provided to look - * ahead into the event's guts and guess the size. - * -EINVAL Unknown event code (wEvent). - * - * This will look at the received RCEB and guess what is the total - * size by checking all the tables registered with - * uwb_est_register(). For variable sized events, it will look further - * ahead into their length field to see how much data should be read. - * - * Note this size is *not* final--the neh (Notification/Event Handle) - * might specificy an extra size to add or replace. - */ -ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, - size_t rceb_size) -{ - /* FIXME: add vendor/product data */ - ssize_t size; - struct device *dev = &rc->uwb_dev.dev; - unsigned long flags; - unsigned itr; - u16 type_event_high, event; - - read_lock_irqsave(&uwb_est_lock, flags); - size = -ENOSPC; - if (rceb_size < sizeof(*rceb)) - goto out; - event = le16_to_cpu(rceb->wEvent); - type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; - for (itr = 0; itr < uwb_est_used; itr++) { - if (uwb_est[itr].type_event_high != type_event_high) - continue; - size = uwb_est_get_size(rc, &uwb_est[itr], - event & 0x00ff, rceb, rceb_size); - /* try more tables that might handle the same type */ - if (size != -ENOENT) - goto out; - } - dev_dbg(dev, - "event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n", - (unsigned) rceb->bEventType, - (unsigned) le16_to_cpu(rceb->wEvent), - (unsigned) rceb->bEventContext, - rceb); - size = -ENOENT; -out: - read_unlock_irqrestore(&uwb_est_lock, flags); - return size; -} -EXPORT_SYMBOL_GPL(uwb_est_find_size); diff --git a/drivers/staging/uwb/hwa-rc.c b/drivers/staging/uwb/hwa-rc.c deleted file mode 100644 index b6effad749d7..000000000000 --- a/drivers/staging/uwb/hwa-rc.c +++ /dev/null @@ -1,929 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6]) - * Radio Control command/event transport - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Initialize the Radio Control interface Driver. - * - * For each device probed, creates an 'struct hwarc' which contains - * just the representation of the UWB Radio Controller, and the logic - * for reading notifications and passing them to the UWB Core. - * - * So we initialize all of those, register the UWB Radio Controller - * and setup the notification/event handle to pipe the notifications - * to the UWB management Daemon. - * - * Command and event filtering. - * - * This is the driver for the Radio Control Interface described in WUSB - * 1.0. The core UWB module assumes that all drivers are compliant to the - * WHCI 0.95 specification. We thus create a filter that parses all - * incoming messages from the (WUSB 1.0) device and manipulate them to - * conform to the WHCI 0.95 specification. Similarly, outgoing messages - * are parsed and manipulated to conform to the WUSB 1.0 compliant messages - * that the device expects. Only a few messages are affected: - * Affected events: - * UWB_RC_EVT_BEACON - * UWB_RC_EVT_BP_SLOT_CHANGE - * UWB_RC_EVT_DRP_AVAIL - * UWB_RC_EVT_DRP - * Affected commands: - * UWB_RC_CMD_SCAN - * UWB_RC_CMD_SET_DRP_IE - */ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/usb.h> -#include "../wusbcore/include/wusb.h" -#include "../wusbcore/include/wusb-wa.h" -#include "uwb.h" - -#include "uwb-internal.h" - -/* The device uses commands and events from the WHCI specification, although - * reporting itself as WUSB compliant. */ -#define WUSB_QUIRK_WHCI_CMD_EVT 0x01 - -/** - * Descriptor for an instance of the UWB Radio Control Driver that - * attaches to the RCI interface of the Host Wired Adapter. - * - * Unless there is a lock specific to the 'data members', all access - * is protected by uwb_rc->mutex. - * - * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to - * @rd_buffer. Note there is no locking because it is perfectly (heh!) - * serialized--probe() submits an URB, callback is called, processes - * the data (synchronously), submits another URB, and so on. There is - * no concurrent access to the buffer. - */ -struct hwarc { - struct usb_device *usb_dev; - struct usb_interface *usb_iface; - struct uwb_rc *uwb_rc; /* UWB host controller */ - struct urb *neep_urb; /* Notification endpoint handling */ - struct edc neep_edc; - void *rd_buffer; /* NEEP read buffer */ -}; - - -/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */ -struct uwb_rc_evt_beacon_WUSB_0100 { - struct uwb_rceb rceb; - u8 bChannelNumber; - __le16 wBPSTOffset; - u8 bLQI; - u8 bRSSI; - __le16 wBeaconInfoLength; - u8 BeaconInfo[]; -} __attribute__((packed)); - -/** - * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95 - * - * @header: the incoming event - * @buf_size: size of buffer containing incoming event - * @new_size: size of event after filtering completed - * - * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at - * the time we receive the beacon from WUSB so we just set it to - * UWB_RC_BEACON_TYPE_NEIGHBOR as a default. - * The solution below allocates memory upon receipt of every beacon from a - * WUSB device. This will deteriorate performance. What is the right way to - * do this? - */ -static -int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc, - struct uwb_rceb **header, - const size_t buf_size, - size_t *new_size) -{ - struct uwb_rc_evt_beacon_WUSB_0100 *be; - struct uwb_rc_evt_beacon *newbe; - size_t bytes_left, ielength; - struct device *dev = &rc->uwb_dev.dev; - - be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb); - bytes_left = buf_size; - if (bytes_left < sizeof(*be)) { - dev_err(dev, "Beacon Received Notification: Not enough data " - "to decode for filtering (%zu vs %zu bytes needed)\n", - bytes_left, sizeof(*be)); - return -EINVAL; - } - bytes_left -= sizeof(*be); - ielength = le16_to_cpu(be->wBeaconInfoLength); - if (bytes_left < ielength) { - dev_err(dev, "Beacon Received Notification: Not enough data " - "to decode IEs (%zu vs %zu bytes needed)\n", - bytes_left, ielength); - return -EINVAL; - } - newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC); - if (newbe == NULL) - return -ENOMEM; - newbe->rceb = be->rceb; - newbe->bChannelNumber = be->bChannelNumber; - newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR; - newbe->wBPSTOffset = be->wBPSTOffset; - newbe->bLQI = be->bLQI; - newbe->bRSSI = be->bRSSI; - newbe->wBeaconInfoLength = be->wBeaconInfoLength; - memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength); - *header = &newbe->rceb; - *new_size = sizeof(*newbe) + ielength; - return 1; /* calling function will free memory */ -} - - -/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */ -struct uwb_rc_evt_drp_avail_WUSB_0100 { - struct uwb_rceb rceb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/** - * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95 - * - * @header: the incoming event - * @buf_size: size of buffer containing incoming event - * @new_size: size of event after filtering completed - */ -static -int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc, - struct uwb_rceb **header, - const size_t buf_size, - size_t *new_size) -{ - struct uwb_rc_evt_drp_avail_WUSB_0100 *da; - struct uwb_rc_evt_drp_avail *newda; - struct uwb_ie_hdr *ie_hdr; - size_t bytes_left, ielength; - struct device *dev = &rc->uwb_dev.dev; - - - da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb); - bytes_left = buf_size; - if (bytes_left < sizeof(*da)) { - dev_err(dev, "Not enough data to decode DRP Avail " - "Notification for filtering. Expected %zu, " - "received %zu.\n", (size_t)sizeof(*da), bytes_left); - return -EINVAL; - } - bytes_left -= sizeof(*da); - ielength = le16_to_cpu(da->wIELength); - if (bytes_left < ielength) { - dev_err(dev, "DRP Avail Notification filter: IE length " - "[%zu bytes] does not match actual length " - "[%zu bytes].\n", ielength, bytes_left); - return -EINVAL; - } - if (ielength < sizeof(*ie_hdr)) { - dev_err(dev, "DRP Avail Notification filter: Not enough " - "data to decode IE [%zu bytes, %zu needed]\n", - ielength, sizeof(*ie_hdr)); - return -EINVAL; - } - ie_hdr = (void *) da->IEData; - if (ie_hdr->length > 32) { - dev_err(dev, "DRP Availability Change event has unexpected " - "length for filtering. Expected < 32 bytes, " - "got %zu bytes.\n", (size_t)ie_hdr->length); - return -EINVAL; - } - newda = kzalloc(sizeof(*newda), GFP_ATOMIC); - if (newda == NULL) - return -ENOMEM; - newda->rceb = da->rceb; - memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length); - *header = &newda->rceb; - *new_size = sizeof(*newda); - return 1; /* calling function will free memory */ -} - - -/* DRP notification (WUSB 1.0 [8.6.3.9]) */ -struct uwb_rc_evt_drp_WUSB_0100 { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - u8 bExplicit; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/** - * Filter WUSB 1.0 DRP Notification to be WHCI 0.95 - * - * @header: the incoming event - * @buf_size: size of buffer containing incoming event - * @new_size: size of event after filtering completed - * - * It is hard to manage DRP reservations without having a Reason code. - * Unfortunately there is none in the WUSB spec. We just set the default to - * DRP IE RECEIVED. - * We do not currently use the bBeaconSlotNumber value, so we set this to - * zero for now. - */ -static -int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc, - struct uwb_rceb **header, - const size_t buf_size, - size_t *new_size) -{ - struct uwb_rc_evt_drp_WUSB_0100 *drpev; - struct uwb_rc_evt_drp *newdrpev; - size_t bytes_left, ielength; - struct device *dev = &rc->uwb_dev.dev; - - drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb); - bytes_left = buf_size; - if (bytes_left < sizeof(*drpev)) { - dev_err(dev, "Not enough data to decode DRP Notification " - "for filtering. Expected %zu, received %zu.\n", - (size_t)sizeof(*drpev), bytes_left); - return -EINVAL; - } - ielength = le16_to_cpu(drpev->wIELength); - bytes_left -= sizeof(*drpev); - if (bytes_left < ielength) { - dev_err(dev, "DRP Notification filter: header length [%zu " - "bytes] does not match actual length [%zu " - "bytes].\n", ielength, bytes_left); - return -EINVAL; - } - newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC); - if (newdrpev == NULL) - return -ENOMEM; - newdrpev->rceb = drpev->rceb; - newdrpev->src_addr = drpev->wSrcAddr; - newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD; - newdrpev->beacon_slot_number = 0; - newdrpev->ie_length = drpev->wIELength; - memcpy(newdrpev->ie_data, drpev->IEData, ielength); - *header = &newdrpev->rceb; - *new_size = sizeof(*newdrpev) + ielength; - return 1; /* calling function will free memory */ -} - - -/* Scan Command (WUSB 1.0 [8.6.2.5]) */ -struct uwb_rc_cmd_scan_WUSB_0100 { - struct uwb_rccb rccb; - u8 bChannelNumber; - u8 bScanState; -} __attribute__((packed)); - -/** - * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command - * - * @header: command sent to device (compliant to WHCI 0.95) - * @size: size of command sent to device - * - * We only reduce the size by two bytes because the WUSB 1.0 scan command - * does not have the last field (wStarttime). Also, make sure we don't send - * the device an unexpected scan type. - */ -static -int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc, - struct uwb_rccb **header, - size_t *size) -{ - struct uwb_rc_cmd_scan *sc; - - sc = container_of(*header, struct uwb_rc_cmd_scan, rccb); - - if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME) - sc->bScanState = UWB_SCAN_ONLY; - /* Don't send the last two bytes. */ - *size -= 2; - return 0; -} - - -/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */ -struct uwb_rc_cmd_set_drp_ie_WUSB_0100 { - struct uwb_rccb rccb; - u8 bExplicit; - __le16 wIELength; - struct uwb_ie_drp IEData[]; -} __attribute__((packed)); - -/** - * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command - * - * @header: command sent to device (compliant to WHCI 0.95) - * @size: size of command sent to device - * - * WUSB has an extra bExplicit field - we assume always explicit - * negotiation so this field is set. The command expected by the device is - * thus larger than the one prepared by the driver so we need to - * reallocate memory to accommodate this. - * We trust the driver to send us the correct data so no checking is done - * on incoming data - evn though it is variable length. - */ -static -int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc, - struct uwb_rccb **header, - size_t *size) -{ - struct uwb_rc_cmd_set_drp_ie *orgcmd; - struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd; - size_t ielength; - - orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb); - ielength = le16_to_cpu(orgcmd->wIELength); - cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - cmd->rccb = orgcmd->rccb; - cmd->bExplicit = 0; - cmd->wIELength = orgcmd->wIELength; - memcpy(cmd->IEData, orgcmd->IEData, ielength); - *header = &cmd->rccb; - *size = sizeof(*cmd) + ielength; - return 1; /* calling function will free memory */ -} - - -/** - * Filter data from WHCI driver to WUSB device - * - * @header: WHCI 0.95 compliant command from driver - * @size: length of command - * - * The routine managing commands to the device (uwb_rc_cmd()) will call the - * filtering function pointer (if it exists) before it passes any data to - * the device. At this time the command has been formatted according to - * WHCI 0.95 and is ready to be sent to the device. - * - * The filter function will be provided with the current command and its - * length. The function will manipulate the command if necessary and - * potentially reallocate memory for a command that needed more memory that - * the given command. If new memory was created the function will return 1 - * to indicate to the calling function that the memory need to be freed - * when not needed any more. The size will contain the new length of the - * command. - * If memory has not been allocated we rely on the original mechanisms to - * free the memory of the command - even when we reduce the value of size. - */ -static -int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header, - size_t *size) -{ - int result; - struct uwb_rccb *rccb = *header; - int cmd = le16_to_cpu(rccb->wCommand); - switch (cmd) { - case UWB_RC_CMD_SCAN: - result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size); - break; - case UWB_RC_CMD_SET_DRP_IE: - result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size); - break; - default: - result = -ENOANO; - break; - } - return result; -} - - -/** - * Filter data from WHCI driver to WUSB device - * - * @header: WHCI 0.95 compliant command from driver - * @size: length of command - * - * Filter commands based on which protocol the device supports. The WUSB - * errata should be the same as WHCI 0.95 so we do not filter that here - - * only WUSB 1.0. - */ -static -int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header, - size_t *size) -{ - int result = -ENOANO; - if (rc->version == 0x0100) - result = hwarc_filter_cmd_WUSB_0100(rc, header, size); - return result; -} - - -/** - * Compute return value as sum of incoming value and value at given offset - * - * @rceb: event for which we compute the size, it contains a variable - * length field. - * @core_size: size of the "non variable" part of the event - * @offset: place in event where the length of the variable part is stored - * @buf_size: total length of buffer in which event arrived - we need to make - * sure we read the offset in memory that is still part of the event - */ -static -ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, - size_t core_size, size_t offset, - const size_t buf_size) -{ - ssize_t size = -ENOSPC; - const void *ptr = rceb; - size_t type_size = sizeof(__le16); - struct device *dev = &rc->uwb_dev.dev; - - if (offset + type_size >= buf_size) { - dev_err(dev, "Not enough data to read extra size of event " - "0x%02x/%04x/%02x, only got %zu bytes.\n", - rceb->bEventType, le16_to_cpu(rceb->wEvent), - rceb->bEventContext, buf_size); - goto out; - } - ptr += offset; - size = core_size + le16_to_cpu(*(__le16 *)ptr); -out: - return size; -} - - -/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */ -struct uwb_rc_evt_bp_slot_change_WUSB_0100 { - struct uwb_rceb rceb; - u8 bSlotNumber; -} __attribute__((packed)); - - -/** - * Filter data from WUSB device to WHCI driver - * - * @header: incoming event - * @buf_size: size of buffer in which event arrived - * @_event_size: actual size of event in the buffer - * @new_size: size of event after filtered - * - * We don't know how the buffer is constructed - there may be more than one - * event in it so buffer length does not determine event length. We first - * determine the expected size of the incoming event. This value is passed - * back only if the actual filtering succeeded (so we know the computed - * expected size is correct). This value will be zero if - * the event did not need any filtering. - * - * WHCI interprets the BP Slot Change event's data differently than - * WUSB. The event sizes are exactly the same. The data field - * indicates the new beacon slot in which a RC is transmitting its - * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368 - * 17.16 (Table 117)). We thus know that the WUSB value will not set - * the bit bNoSlot, so we don't really do anything (placeholder). - */ -static -int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header, - const size_t buf_size, size_t *_real_size, - size_t *_new_size) -{ - int result = -ENOANO; - struct uwb_rceb *rceb = *header; - int event = le16_to_cpu(rceb->wEvent); - ssize_t event_size; - size_t core_size, offset; - - if (rceb->bEventType != UWB_RC_CET_GENERAL) - goto out; - switch (event) { - case UWB_RC_EVT_BEACON: - core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100); - offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100, - wBeaconInfoLength); - event_size = hwarc_get_event_size(rc, rceb, core_size, - offset, buf_size); - if (event_size < 0) - goto out; - *_real_size = event_size; - result = hwarc_filter_evt_beacon_WUSB_0100(rc, header, - buf_size, _new_size); - break; - case UWB_RC_EVT_BP_SLOT_CHANGE: - *_new_size = *_real_size = - sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100); - result = 0; - break; - - case UWB_RC_EVT_DRP_AVAIL: - core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100); - offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100, - wIELength); - event_size = hwarc_get_event_size(rc, rceb, core_size, - offset, buf_size); - if (event_size < 0) - goto out; - *_real_size = event_size; - result = hwarc_filter_evt_drp_avail_WUSB_0100( - rc, header, buf_size, _new_size); - break; - - case UWB_RC_EVT_DRP: - core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100); - offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength); - event_size = hwarc_get_event_size(rc, rceb, core_size, - offset, buf_size); - if (event_size < 0) - goto out; - *_real_size = event_size; - result = hwarc_filter_evt_drp_WUSB_0100(rc, header, - buf_size, _new_size); - break; - - default: - break; - } -out: - return result; -} - -/** - * Filter data from WUSB device to WHCI driver - * - * @header: incoming event - * @buf_size: size of buffer in which event arrived - * @_event_size: actual size of event in the buffer - * @_new_size: size of event after filtered - * - * Filter events based on which protocol the device supports. The WUSB - * errata should be the same as WHCI 0.95 so we do not filter that here - - * only WUSB 1.0. - * - * If we don't handle it, we return -ENOANO (why the weird error code? - * well, so if I get it, I can pinpoint in the code that raised - * it...after all, not too many places use the higher error codes). - */ -static -int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header, - const size_t buf_size, size_t *_real_size, - size_t *_new_size) -{ - int result = -ENOANO; - if (rc->version == 0x0100) - result = hwarc_filter_event_WUSB_0100( - rc, header, buf_size, _real_size, _new_size); - return result; -} - - -/** - * Execute an UWB RC command on HWA - * - * @rc: Instance of a Radio Controller that is a HWA - * @cmd: Buffer containing the RCCB and payload to execute - * @cmd_size: Size of the command buffer. - * - * NOTE: rc's mutex has to be locked - */ -static -int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size) -{ - struct hwarc *hwarc = uwb_rc->priv; - return usb_control_msg( - hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0), - WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber, - (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */); -} - -static -int hwarc_reset(struct uwb_rc *uwb_rc) -{ - struct hwarc *hwarc = uwb_rc->priv; - int result; - - /* device lock must be held when calling usb_reset_device. */ - result = usb_lock_device_for_reset(hwarc->usb_dev, NULL); - if (result >= 0) { - result = usb_reset_device(hwarc->usb_dev); - usb_unlock_device(hwarc->usb_dev); - } - - return result; -} - -/** - * Callback for the notification and event endpoint - * - * Check's that everything is fine and then passes the read data to - * the notification/event handling mechanism (neh). - */ -static -void hwarc_neep_cb(struct urb *urb) -{ - struct hwarc *hwarc = urb->context; - struct usb_interface *usb_iface = hwarc->usb_iface; - struct device *dev = &usb_iface->dev; - int result; - - switch (result = urb->status) { - case 0: - uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, - urb->actual_length); - break; - case -ECONNRESET: /* Not an error, but a controlled situation; */ - case -ENOENT: /* (we killed the URB)...so, no broadcast */ - goto out; - case -ESHUTDOWN: /* going away! */ - goto out; - default: /* On general errors, retry unless it gets ugly */ - if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)) - goto error_exceeded; - dev_err(dev, "NEEP: URB error %d\n", urb->status); - } - result = usb_submit_urb(urb, GFP_ATOMIC); - if (result < 0 && result != -ENODEV && result != -EPERM) { - /* ignoring unrecoverable errors */ - dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", - result); - goto error; - } -out: - return; - -error_exceeded: - dev_err(dev, "NEEP: URB max acceptable errors " - "exceeded, resetting device\n"); -error: - uwb_rc_neh_error(hwarc->uwb_rc, result); - uwb_rc_reset_all(hwarc->uwb_rc); - return; -} - -static void hwarc_init(struct hwarc *hwarc) -{ - edc_init(&hwarc->neep_edc); -} - -/** - * Initialize the notification/event endpoint stuff - * - * Note this is effectively a parallel thread; it knows that - * hwarc->uwb_rc always exists because the existence of a 'hwarc' - * means that there is a reverence on the hwarc->uwb_rc (see - * _probe()), and thus _neep_cb() can execute safely. - */ -static int hwarc_neep_init(struct uwb_rc *rc) -{ - struct hwarc *hwarc = rc->priv; - struct usb_interface *iface = hwarc->usb_iface; - struct usb_device *usb_dev = interface_to_usbdev(iface); - struct device *dev = &iface->dev; - int result; - struct usb_endpoint_descriptor *epd; - - epd = &iface->cur_altsetting->endpoint[0].desc; - hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL); - if (hwarc->rd_buffer == NULL) { - dev_err(dev, "Unable to allocate notification's read buffer\n"); - goto error_rd_buffer; - } - hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL); - if (hwarc->neep_urb == NULL) - goto error_urb_alloc; - usb_fill_int_urb(hwarc->neep_urb, usb_dev, - usb_rcvintpipe(usb_dev, epd->bEndpointAddress), - hwarc->rd_buffer, PAGE_SIZE, - hwarc_neep_cb, hwarc, epd->bInterval); - result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC); - if (result < 0) { - dev_err(dev, "Cannot submit notification URB: %d\n", result); - goto error_neep_submit; - } - return 0; - -error_neep_submit: - usb_free_urb(hwarc->neep_urb); - hwarc->neep_urb = NULL; -error_urb_alloc: - free_page((unsigned long)hwarc->rd_buffer); - hwarc->rd_buffer = NULL; -error_rd_buffer: - return -ENOMEM; -} - - -/** Clean up all the notification endpoint resources */ -static void hwarc_neep_release(struct uwb_rc *rc) -{ - struct hwarc *hwarc = rc->priv; - - usb_kill_urb(hwarc->neep_urb); - usb_free_urb(hwarc->neep_urb); - hwarc->neep_urb = NULL; - - free_page((unsigned long)hwarc->rd_buffer); - hwarc->rd_buffer = NULL; -} - -/** - * Get the version from class-specific descriptor - * - * NOTE: this descriptor comes with the big bundled configuration - * descriptor that includes the interfaces' and endpoints', so - * we just look for it in the cached copy kept by the USB stack. - * - * NOTE2: We convert LE fields to CPU order. - */ -static int hwarc_get_version(struct uwb_rc *rc) -{ - int result; - - struct hwarc *hwarc = rc->priv; - struct uwb_rc_control_intf_class_desc *descr; - struct device *dev = &rc->uwb_dev.dev; - struct usb_device *usb_dev = hwarc->usb_dev; - char *itr; - struct usb_descriptor_header *hdr; - size_t itr_size, actconfig_idx; - u16 version; - - actconfig_idx = (usb_dev->actconfig - usb_dev->config) / - sizeof(usb_dev->config[0]); - itr = usb_dev->rawdescriptors[actconfig_idx]; - itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); - while (itr_size >= sizeof(*hdr)) { - hdr = (struct usb_descriptor_header *) itr; - dev_dbg(dev, "Extra device descriptor: " - "type %02x/%u bytes @ %zu (%zu left)\n", - hdr->bDescriptorType, hdr->bLength, - (itr - usb_dev->rawdescriptors[actconfig_idx]), - itr_size); - if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) - goto found; - itr += hdr->bLength; - itr_size -= hdr->bLength; - } - dev_err(dev, "cannot find Radio Control Interface Class descriptor\n"); - return -ENODEV; - -found: - result = -EINVAL; - if (hdr->bLength > itr_size) { /* is it available? */ - dev_err(dev, "incomplete Radio Control Interface Class " - "descriptor (%zu bytes left, %u needed)\n", - itr_size, hdr->bLength); - goto error; - } - if (hdr->bLength < sizeof(*descr)) { - dev_err(dev, "short Radio Control Interface Class " - "descriptor\n"); - goto error; - } - descr = (struct uwb_rc_control_intf_class_desc *) hdr; - /* Make LE fields CPU order */ - version = __le16_to_cpu(descr->bcdRCIVersion); - if (version != 0x0100) { - dev_err(dev, "Device reports protocol version 0x%04x. We " - "do not support that. \n", version); - result = -EINVAL; - goto error; - } - rc->version = version; - dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version); - result = 0; -error: - return result; -} - -/* - * By creating a 'uwb_rc', we have a reference on it -- that reference - * is the one we drop when we disconnect. - * - * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there - * is only one altsetting allowed. - */ -static int hwarc_probe(struct usb_interface *iface, - const struct usb_device_id *id) -{ - int result; - struct uwb_rc *uwb_rc; - struct hwarc *hwarc; - struct device *dev = &iface->dev; - - if (iface->cur_altsetting->desc.bNumEndpoints < 1) - return -ENODEV; - if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc)) - return -ENODEV; - - result = -ENOMEM; - uwb_rc = uwb_rc_alloc(); - if (uwb_rc == NULL) { - dev_err(dev, "unable to allocate RC instance\n"); - goto error_rc_alloc; - } - hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL); - if (hwarc == NULL) { - dev_err(dev, "unable to allocate HWA RC instance\n"); - goto error_alloc; - } - hwarc_init(hwarc); - hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface)); - hwarc->usb_iface = usb_get_intf(iface); - hwarc->uwb_rc = uwb_rc; - - uwb_rc->owner = THIS_MODULE; - uwb_rc->start = hwarc_neep_init; - uwb_rc->stop = hwarc_neep_release; - uwb_rc->cmd = hwarc_cmd; - uwb_rc->reset = hwarc_reset; - if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) { - uwb_rc->filter_cmd = NULL; - uwb_rc->filter_event = NULL; - } else { - uwb_rc->filter_cmd = hwarc_filter_cmd; - uwb_rc->filter_event = hwarc_filter_event; - } - - result = uwb_rc_add(uwb_rc, dev, hwarc); - if (result < 0) - goto error_rc_add; - result = hwarc_get_version(uwb_rc); - if (result < 0) { - dev_err(dev, "cannot retrieve version of RC \n"); - goto error_get_version; - } - usb_set_intfdata(iface, hwarc); - return 0; - -error_get_version: - uwb_rc_rm(uwb_rc); -error_rc_add: - usb_put_intf(iface); - usb_put_dev(hwarc->usb_dev); - kfree(hwarc); -error_alloc: - uwb_rc_put(uwb_rc); -error_rc_alloc: - return result; -} - -static void hwarc_disconnect(struct usb_interface *iface) -{ - struct hwarc *hwarc = usb_get_intfdata(iface); - struct uwb_rc *uwb_rc = hwarc->uwb_rc; - - usb_set_intfdata(hwarc->usb_iface, NULL); - uwb_rc_rm(uwb_rc); - usb_put_intf(hwarc->usb_iface); - usb_put_dev(hwarc->usb_dev); - kfree(hwarc); - uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ -} - -static int hwarc_pre_reset(struct usb_interface *iface) -{ - struct hwarc *hwarc = usb_get_intfdata(iface); - struct uwb_rc *uwb_rc = hwarc->uwb_rc; - - uwb_rc_pre_reset(uwb_rc); - return 0; -} - -static int hwarc_post_reset(struct usb_interface *iface) -{ - struct hwarc *hwarc = usb_get_intfdata(iface); - struct uwb_rc *uwb_rc = hwarc->uwb_rc; - - return uwb_rc_post_reset(uwb_rc); -} - -/** USB device ID's that we handle */ -static const struct usb_device_id hwarc_id_table[] = { - /* D-Link DUB-1210 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), - .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, - /* Intel i1480 (using firmware 1.3PA2-20070828) */ - { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02), - .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, - /* Alereon 5310 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x01, 0x02), - .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, - /* Alereon 5611 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x01, 0x02), - .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, - /* Generic match for the Radio Control interface */ - { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), }, - { }, -}; -MODULE_DEVICE_TABLE(usb, hwarc_id_table); - -static struct usb_driver hwarc_driver = { - .name = "hwa-rc", - .id_table = hwarc_id_table, - .probe = hwarc_probe, - .disconnect = hwarc_disconnect, - .pre_reset = hwarc_pre_reset, - .post_reset = hwarc_post_reset, -}; - -module_usb_driver(hwarc_driver); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/uwb/i1480/Makefile b/drivers/staging/uwb/i1480/Makefile deleted file mode 100644 index d26fb9b845ae..000000000000 --- a/drivers/staging/uwb/i1480/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o diff --git a/drivers/staging/uwb/i1480/dfu/Makefile b/drivers/staging/uwb/i1480/dfu/Makefile deleted file mode 100644 index 4739fdac5922..000000000000 --- a/drivers/staging/uwb/i1480/dfu/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o - -i1480-dfu-usb-objs := \ - dfu.o \ - mac.o \ - phy.o \ - usb.o - - diff --git a/drivers/staging/uwb/i1480/dfu/dfu.c b/drivers/staging/uwb/i1480/dfu/dfu.c deleted file mode 100644 index 9d51ce8faad1..000000000000 --- a/drivers/staging/uwb/i1480/dfu/dfu.c +++ /dev/null @@ -1,198 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless UWB Link 1480 - * Main driver - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Common code for firmware upload used by the USB and PCI version; - * i1480_fw_upload() takes a device descriptor and uses the function - * pointers it provides to upload firmware and prepare the PHY. - * - * As well, provides common functions used by the rest of the code. - */ -#include "i1480-dfu.h" -#include <linux/errno.h> -#include <linux/delay.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/random.h> -#include <linux/export.h> -#include "../../uwb.h" - -/* - * i1480_rceb_check - Check RCEB for expected field values - * @i1480: pointer to device for which RCEB is being checked - * @rceb: RCEB being checked - * @cmd: which command the RCEB is related to - * @context: expected context - * @expected_type: expected event type - * @expected_event: expected event - * - * If @cmd is NULL, do not print error messages, but still return an error - * code. - * - * Return 0 if @rceb matches the expected values, -EINVAL otherwise. - */ -int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, - const char *cmd, u8 context, u8 expected_type, - unsigned expected_event) -{ - int result = 0; - struct device *dev = i1480->dev; - if (rceb->bEventContext != context) { - if (cmd) - dev_err(dev, "%s: unexpected context id 0x%02x " - "(expected 0x%02x)\n", cmd, - rceb->bEventContext, context); - result = -EINVAL; - } - if (rceb->bEventType != expected_type) { - if (cmd) - dev_err(dev, "%s: unexpected event type 0x%02x " - "(expected 0x%02x)\n", cmd, - rceb->bEventType, expected_type); - result = -EINVAL; - } - if (le16_to_cpu(rceb->wEvent) != expected_event) { - if (cmd) - dev_err(dev, "%s: unexpected event 0x%04x " - "(expected 0x%04x)\n", cmd, - le16_to_cpu(rceb->wEvent), expected_event); - result = -EINVAL; - } - return result; -} -EXPORT_SYMBOL_GPL(i1480_rceb_check); - - -/* - * Execute a Radio Control Command - * - * Command data has to be in i1480->cmd_buf. - * - * @returns size of the reply data filled in i1480->evt_buf or < 0 errno - * code on error. - */ -ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, - size_t reply_size) -{ - ssize_t result; - struct uwb_rceb *reply = i1480->evt_buf; - struct uwb_rccb *cmd = i1480->cmd_buf; - u16 expected_event = reply->wEvent; - u8 expected_type = reply->bEventType; - u8 context; - - init_completion(&i1480->evt_complete); - i1480->evt_result = -EINPROGRESS; - do { - get_random_bytes(&context, 1); - } while (context == 0x00 || context == 0xff); - cmd->bCommandContext = context; - result = i1480->cmd(i1480, cmd_name, cmd_size); - if (result < 0) - goto error; - /* wait for the callback to report a event was received */ - result = wait_for_completion_interruptible_timeout( - &i1480->evt_complete, HZ); - if (result == 0) { - result = -ETIMEDOUT; - goto error; - } - if (result < 0) - goto error; - result = i1480->evt_result; - if (result < 0) { - dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", - cmd_name, result); - goto error; - } - /* - * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a - * spurious notification after firmware is downloaded. So check whether - * the receibed RCEB is such notification before assuming that the - * command has failed. - */ - if (i1480_rceb_check(i1480, i1480->evt_buf, NULL, - 0, 0xfd, 0x0022) == 0) { - /* Now wait for the actual RCEB for this command. */ - result = i1480->wait_init_done(i1480); - if (result < 0) - goto error; - result = i1480->evt_result; - } - if (result != reply_size) { - dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", - cmd_name, result, reply_size); - result = -EINVAL; - goto error; - } - /* Verify we got the right event in response */ - result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, - expected_type, expected_event); -error: - return result; -} -EXPORT_SYMBOL_GPL(i1480_cmd); - - -static -int i1480_print_state(struct i1480 *i1480) -{ - int result; - u32 *buf = (u32 *) i1480->cmd_buf; - - result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf)); - if (result < 0) { - dev_err(i1480->dev, "cannot read U & L states: %d\n", result); - goto error; - } - dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]); -error: - return result; -} - - -/* - * PCI probe, firmware uploader - * - * _mac_fw_upload() will call rc_setup(), which needs an rc_release(). - */ -int i1480_fw_upload(struct i1480 *i1480) -{ - int result; - - result = i1480_pre_fw_upload(i1480); /* PHY pre fw */ - if (result < 0 && result != -ENOENT) { - i1480_print_state(i1480); - goto error; - } - result = i1480_mac_fw_upload(i1480); /* MAC fw */ - if (result < 0) { - if (result == -ENOENT) - dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n", - i1480->mac_fw_name); - else - i1480_print_state(i1480); - goto error; - } - result = i1480_phy_fw_upload(i1480); /* PHY fw */ - if (result < 0 && result != -ENOENT) { - i1480_print_state(i1480); - goto error_rc_release; - } - /* - * FIXME: find some reliable way to check whether firmware is running - * properly. Maybe use some standard request that has no side effects? - */ - dev_info(i1480->dev, "firmware uploaded successfully\n"); -error_rc_release: - if (i1480->rc_release) - i1480->rc_release(i1480); - result = 0; -error: - return result; -} -EXPORT_SYMBOL_GPL(i1480_fw_upload); diff --git a/drivers/staging/uwb/i1480/dfu/i1480-dfu.h b/drivers/staging/uwb/i1480/dfu/i1480-dfu.h deleted file mode 100644 index b21d058ecc23..000000000000 --- a/drivers/staging/uwb/i1480/dfu/i1480-dfu.h +++ /dev/null @@ -1,246 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * i1480 Device Firmware Upload - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This driver is the firmware uploader for the Intel Wireless UWB - * Link 1480 device (both in the USB and PCI incarnations). - * - * The process is quite simple: we stop the device, write the firmware - * to its memory and then restart it. Wait for the device to let us - * know it is done booting firmware. Ready. - * - * We might have to upload before or after a phy firmware (which might - * be done in two methods, using a normal firmware image or through - * the MPI port). - * - * Because USB and PCI use common methods, we just make ops out of the - * common operations (read, write, wait_init_done and cmd) and - * implement them in usb.c and pci.c. - * - * The flow is (some parts omitted): - * - * i1480_{usb,pci}_probe() On enumerate/discovery - * i1480_fw_upload() - * i1480_pre_fw_upload() - * __mac_fw_upload() - * fw_hdrs_load() - * mac_fw_hdrs_push() - * i1480->write() [i1480_{usb,pci}_write()] - * i1480_fw_cmp() - * i1480->read() [i1480_{usb,pci}_read()] - * i1480_mac_fw_upload() - * __mac_fw_upload() - * i1480->setup(() - * i1480->wait_init_done() - * i1480_cmd_reset() - * i1480->cmd() [i1480_{usb,pci}_cmd()] - * ... - * i1480_phy_fw_upload() - * request_firmware() - * i1480_mpi_write() - * i1480->cmd() [i1480_{usb,pci}_cmd()] - * - * Once the probe function enumerates the device and uploads the - * firmware, we just exit with -ENODEV, as we don't really want to - * attach to the device. - */ -#ifndef __i1480_DFU_H__ -#define __i1480_DFU_H__ - -#include <linux/types.h> -#include <linux/completion.h> -#include "../../include/spec.h" - -#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018)) - -#if i1480_FW > 0x00000302 -#define i1480_RCEB_EXTENDED -#endif - -struct uwb_rccb; -struct uwb_rceb; - -/* - * Common firmware upload handlers - * - * Normally you embed this struct in another one specific to your hw. - * - * @write Write to device's memory from buffer. - * @read Read from device's memory to i1480->evt_buf. - * @setup Setup device after basic firmware is uploaded - * @wait_init_done - * Wait for the device to send a notification saying init - * is done. - * @cmd FOP for issuing the command to the hardware. The - * command data is contained in i1480->cmd_buf and the size - * is supplied as an argument. The command replied is put - * in i1480->evt_buf and the size in i1480->evt_result (or if - * an error, a < 0 errno code). - * - * @cmd_buf Memory buffer used to send commands to the device. - * Allocated by the upper layers i1480_fw_upload(). - * Size has to be @buf_size. - * @evt_buf Memory buffer used to place the async notifications - * received by the hw. Allocated by the upper layers - * i1480_fw_upload(). - * Size has to be @buf_size. - * @cmd_complete - * Low level driver uses this to notify code waiting afor - * an event that the event has arrived and data is in - * i1480->evt_buf (and size/result in i1480->evt_result). - * @hw_rev - * Use this value to activate dfu code to support new revisions - * of hardware. i1480_init() sets this to a default value. - * It should be updated by the USB and PCI code. - */ -struct i1480 { - struct device *dev; - - int (*write)(struct i1480 *, u32 addr, const void *, size_t); - int (*read)(struct i1480 *, u32 addr, size_t); - int (*rc_setup)(struct i1480 *); - void (*rc_release)(struct i1480 *); - int (*wait_init_done)(struct i1480 *); - int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size); - const char *pre_fw_name; - const char *mac_fw_name; - const char *mac_fw_name_deprecate; /* FIXME: Will go away */ - const char *phy_fw_name; - u8 hw_rev; - - size_t buf_size; /* size of both evt_buf and cmd_buf */ - void *evt_buf, *cmd_buf; - ssize_t evt_result; - struct completion evt_complete; -}; - -static inline -void i1480_init(struct i1480 *i1480) -{ - i1480->hw_rev = 1; - init_completion(&i1480->evt_complete); -} - -extern int i1480_fw_upload(struct i1480 *); -extern int i1480_pre_fw_upload(struct i1480 *); -extern int i1480_mac_fw_upload(struct i1480 *); -extern int i1480_phy_fw_upload(struct i1480 *); -extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t); -extern int i1480_rceb_check(const struct i1480 *, - const struct uwb_rceb *, const char *, u8, - u8, unsigned); - -enum { - /* Vendor specific command type */ - i1480_CET_VS1 = 0xfd, - /* i1480 commands */ - i1480_CMD_SET_IP_MAS = 0x000e, - i1480_CMD_GET_MAC_PHY_INFO = 0x0003, - i1480_CMD_MPI_WRITE = 0x000f, - i1480_CMD_MPI_READ = 0x0010, - /* i1480 events */ -#if i1480_FW > 0x00000302 - i1480_EVT_CONFIRM = 0x0002, - i1480_EVT_RM_INIT_DONE = 0x0101, - i1480_EVT_DEV_ADD = 0x0103, - i1480_EVT_DEV_RM = 0x0104, - i1480_EVT_DEV_ID_CHANGE = 0x0105, - i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO, -#else - i1480_EVT_CONFIRM = 0x0002, - i1480_EVT_RM_INIT_DONE = 0x0101, - i1480_EVT_DEV_ADD = 0x0103, - i1480_EVT_DEV_RM = 0x0104, - i1480_EVT_DEV_ID_CHANGE = 0x0105, - i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM, -#endif -}; - - -struct i1480_evt_confirm { - struct uwb_rceb rceb; -#ifdef i1480_RCEB_EXTENDED - __le16 wParamLength; -#endif - u8 bResultCode; -} __attribute__((packed)); - - -struct i1480_rceb { - struct uwb_rceb rceb; -#ifdef i1480_RCEB_EXTENDED - __le16 wParamLength; -#endif -} __attribute__((packed)); - - -/** - * Get MAC & PHY Information confirm event structure - * - * Confirm event returned by the command. - */ -struct i1480_evt_confirm_GMPI { -#if i1480_FW > 0x00000302 - struct uwb_rceb rceb; - __le16 wParamLength; - __le16 status; - u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */ - u8 dev_addr[2]; - __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ - u8 hw_rev; - u8 phy_vendor; - u8 phy_rev; /* major v = >> 8; minor = v & 0xff */ - __le16 mac_caps; - u8 phy_caps[3]; - u8 key_stores; - __le16 mcast_addr_stores; - u8 sec_mode_supported; -#else - struct uwb_rceb rceb; - u8 status; - u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */ - u8 dev_addr[2]; - __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ - __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */ - __le16 mac_caps; - u8 phy_caps; - u8 key_stores; - __le16 mcast_addr_stores; - u8 sec_mode_supported; -#endif -} __attribute__((packed)); - - -struct i1480_cmd_mpi_write { - struct uwb_rccb rccb; - __le16 size; - u8 data[]; -}; - - -struct i1480_cmd_mpi_read { - struct uwb_rccb rccb; - __le16 size; - struct { - u8 page, offset; - } __attribute__((packed)) data[]; -} __attribute__((packed)); - - -struct i1480_evt_mpi_read { - struct uwb_rceb rceb; -#ifdef i1480_RCEB_EXTENDED - __le16 wParamLength; -#endif - u8 bResultCode; - __le16 size; - struct { - u8 page, offset, value; - } __attribute__((packed)) data[]; -} __attribute__((packed)); - - -#endif /* #ifndef __i1480_DFU_H__ */ diff --git a/drivers/staging/uwb/i1480/dfu/mac.c b/drivers/staging/uwb/i1480/dfu/mac.c deleted file mode 100644 index 6e4d6c9cecf5..000000000000 --- a/drivers/staging/uwb/i1480/dfu/mac.c +++ /dev/null @@ -1,496 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless UWB Link 1480 - * MAC Firmware upload implementation - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Implementation of the code for parsing the firmware file (extract - * the headers and binary code chunks) in the fw_*() functions. The - * code to upload pre and mac firmwares is the same, so it uses a - * common entry point in __mac_fw_upload(), which uses the i1480 - * function pointers to push the firmware to the device. - */ -#include <linux/delay.h> -#include <linux/firmware.h> -#include <linux/slab.h> -#include "../../uwb.h" -#include "i1480-dfu.h" - -/* - * Descriptor for a continuous segment of MAC fw data - */ -struct fw_hdr { - unsigned long address; - size_t length; - const u32 *bin; - struct fw_hdr *next; -}; - - -/* Free a chain of firmware headers */ -static -void fw_hdrs_free(struct fw_hdr *hdr) -{ - struct fw_hdr *next; - - while (hdr) { - next = hdr->next; - kfree(hdr); - hdr = next; - } -} - - -/* Fill a firmware header descriptor from a memory buffer */ -static -int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt, - const char *_data, const u32 *data_itr, const u32 *data_top) -{ - size_t hdr_offset = (const char *) data_itr - _data; - size_t remaining_size = (void *) data_top - (void *) data_itr; - if (data_itr + 2 > data_top) { - dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at " - "offset %zu, limit %zu\n", - hdr_cnt, hdr_offset, - (const char *) data_itr + 2 - _data, - (const char *) data_top - _data); - return -EINVAL; - } - hdr->next = NULL; - hdr->address = le32_to_cpu(*data_itr++); - hdr->length = le32_to_cpu(*data_itr++); - hdr->bin = data_itr; - if (hdr->length > remaining_size) { - dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; " - "chunk too long (%zu bytes), only %zu left\n", - hdr_cnt, hdr_offset, hdr->length, remaining_size); - return -EINVAL; - } - return 0; -} - - -/** - * Get a buffer where the firmware is supposed to be and create a - * chain of headers linking them together. - * - * @phdr: where to place the pointer to the first header (headers link - * to the next via the @hdr->next ptr); need to free the whole - * chain when done. - * - * @_data: Pointer to the data buffer. - * - * @_data_size: Size of the data buffer (bytes); data size has to be a - * multiple of 4. Function will fail if not. - * - * Goes over the whole binary blob; reads the first chunk and creates - * a fw hdr from it (which points to where the data is in @_data and - * the length of the chunk); then goes on to the next chunk until - * done. Each header is linked to the next. - */ -static -int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr, - const char *_data, size_t data_size) -{ - int result; - unsigned hdr_cnt = 0; - u32 *data = (u32 *) _data, *data_itr, *data_top; - struct fw_hdr *hdr, **prev_hdr = phdr; - - result = -EINVAL; - /* Check size is ok and pointer is aligned */ - if (data_size % sizeof(u32) != 0) - goto error; - if ((unsigned long) _data % sizeof(u16) != 0) - goto error; - *phdr = NULL; - data_itr = data; - data_top = (u32 *) (_data + data_size); - while (data_itr < data_top) { - result = -ENOMEM; - hdr = kmalloc(sizeof(*hdr), GFP_KERNEL); - if (hdr == NULL) { - dev_err(i1480->dev, "Cannot allocate fw header " - "for chunk #%u\n", hdr_cnt); - goto error_alloc; - } - result = fw_hdr_load(i1480, hdr, hdr_cnt, - _data, data_itr, data_top); - if (result < 0) - goto error_load; - data_itr += 2 + hdr->length; - *prev_hdr = hdr; - prev_hdr = &hdr->next; - hdr_cnt++; - }; - *prev_hdr = NULL; - return 0; - -error_load: - kfree(hdr); -error_alloc: - fw_hdrs_free(*phdr); -error: - return result; -} - - -/** - * Compares a chunk of fw with one in the devices's memory - * - * @i1480: Device instance - * @hdr: Pointer to the firmware chunk - * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset - * where the difference was found (plus one). - * - * Kind of dirty and simplistic, but does the trick in both the PCI - * and USB version. We do a quick[er] memcmp(), and if it fails, we do - * a byte-by-byte to find the offset. - */ -static -ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr) -{ - ssize_t result = 0; - u32 src_itr = 0, cnt; - size_t size = hdr->length*sizeof(hdr->bin[0]); - size_t chunk_size; - u8 *bin = (u8 *) hdr->bin; - - while (size > 0) { - chunk_size = size < i1480->buf_size ? size : i1480->buf_size; - result = i1480->read(i1480, hdr->address + src_itr, chunk_size); - if (result < 0) { - dev_err(i1480->dev, "error reading for verification: " - "%zd\n", result); - goto error; - } - if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { - u8 *buf = i1480->cmd_buf; - for (cnt = 0; cnt < result; cnt++) - if (bin[src_itr + cnt] != buf[cnt]) { - dev_err(i1480->dev, "byte failed at " - "src_itr %u cnt %u [0x%02x " - "vs 0x%02x]\n", src_itr, cnt, - bin[src_itr + cnt], buf[cnt]); - result = src_itr + cnt + 1; - goto cmp_failed; - } - } - src_itr += result; - size -= result; - } - result = 0; -error: -cmp_failed: - return result; -} - - -/** - * Writes firmware headers to the device. - * - * @prd: PRD instance - * @hdr: Processed firmware - * @returns: 0 if ok, < 0 errno on error. - */ -static -int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr, - const char *fw_name, const char *fw_tag) -{ - struct device *dev = i1480->dev; - ssize_t result = 0; - struct fw_hdr *hdr_itr; - int verif_retry_count; - - /* Now, header by header, push them to the hw */ - for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { - verif_retry_count = 0; -retry: - dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n", - hdr_itr->length * sizeof(hdr_itr->bin[0]), - hdr_itr->address); - result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin, - hdr_itr->length*sizeof(hdr_itr->bin[0])); - if (result < 0) { - dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):" - " %zd\n", fw_tag, fw_name, - hdr_itr->length * sizeof(hdr_itr->bin[0]), - hdr_itr->address, result); - break; - } - result = i1480_fw_cmp(i1480, hdr_itr); - if (result < 0) { - dev_err(dev, "%s fw '%s': verification read " - "failed (%zuB @ 0x%lx): %zd\n", - fw_tag, fw_name, - hdr_itr->length * sizeof(hdr_itr->bin[0]), - hdr_itr->address, result); - break; - } - if (result > 0) { /* Offset where it failed + 1 */ - result--; - dev_err(dev, "%s fw '%s': WARNING: verification " - "failed at 0x%lx: retrying\n", - fw_tag, fw_name, hdr_itr->address + result); - if (++verif_retry_count < 3) - goto retry; /* write this block again! */ - dev_err(dev, "%s fw '%s': verification failed at 0x%lx: " - "tried %d times\n", fw_tag, fw_name, - hdr_itr->address + result, verif_retry_count); - result = -EINVAL; - break; - } - } - return result; -} - - -/** Puts the device in firmware upload mode.*/ -static -int mac_fw_upload_enable(struct i1480 *i1480) -{ - int result; - u32 reg = 0x800000c0; - u32 *buffer = (u32 *)i1480->cmd_buf; - - if (i1480->hw_rev > 1) - reg = 0x8000d0d4; - result = i1480->read(i1480, reg, sizeof(u32)); - if (result < 0) - goto error_cmd; - *buffer &= ~i1480_FW_UPLOAD_MODE_MASK; - result = i1480->write(i1480, reg, buffer, sizeof(u32)); - if (result < 0) - goto error_cmd; - return 0; -error_cmd: - dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result); - return result; -} - - -/** Gets the device out of firmware upload mode. */ -static -int mac_fw_upload_disable(struct i1480 *i1480) -{ - int result; - u32 reg = 0x800000c0; - u32 *buffer = (u32 *)i1480->cmd_buf; - - if (i1480->hw_rev > 1) - reg = 0x8000d0d4; - result = i1480->read(i1480, reg, sizeof(u32)); - if (result < 0) - goto error_cmd; - *buffer |= i1480_FW_UPLOAD_MODE_MASK; - result = i1480->write(i1480, reg, buffer, sizeof(u32)); - if (result < 0) - goto error_cmd; - return 0; -error_cmd: - dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result); - return result; -} - - - -/** - * Generic function for uploading a MAC firmware. - * - * @i1480: Device instance - * @fw_name: Name of firmware file to upload. - * @fw_tag: Name of the firmware type (for messages) - * [eg: MAC, PRE] - * @do_wait: Wait for device to emit initialization done message (0 - * for PRE fws, 1 for MAC fws). - * @returns: 0 if ok, < 0 errno on error. - */ -static -int __mac_fw_upload(struct i1480 *i1480, const char *fw_name, - const char *fw_tag) -{ - int result; - const struct firmware *fw; - struct fw_hdr *fw_hdrs; - - result = request_firmware(&fw, fw_name, i1480->dev); - if (result < 0) /* Up to caller to complain on -ENOENT */ - goto out; - result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); - if (result < 0) { - dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " - "file: %d\n", fw_tag, fw_name, result); - goto out_release; - } - result = mac_fw_upload_enable(i1480); - if (result < 0) - goto out_hdrs_release; - result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag); - mac_fw_upload_disable(i1480); -out_hdrs_release: - if (result >= 0) - dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name); - else - dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), " - "power cycle device\n", fw_tag, fw_name, result); - fw_hdrs_free(fw_hdrs); -out_release: - release_firmware(fw); -out: - return result; -} - - -/** - * Upload a pre-PHY firmware - * - */ -int i1480_pre_fw_upload(struct i1480 *i1480) -{ - int result; - result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE"); - if (result == 0) - msleep(400); - return result; -} - - -/** - * Reset a the MAC and PHY - * - * @i1480: Device's instance - * @returns: 0 if ok, < 0 errno code on error - * - * We put the command on kmalloc'ed memory as some arches cannot do - * USB from the stack. The reply event is copied from an stage buffer, - * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. - * - * We issue the reset to make sure the UWB controller reinits the PHY; - * this way we can now if the PHY init went ok. - */ -static -int i1480_cmd_reset(struct i1480 *i1480) -{ - int result; - struct uwb_rccb *cmd = (void *) i1480->cmd_buf; - struct i1480_evt_reset { - struct uwb_rceb rceb; - u8 bResultCode; - } __attribute__((packed)) *reply = (void *) i1480->evt_buf; - - result = -ENOMEM; - cmd->bCommandType = UWB_RC_CET_GENERAL; - cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); - reply->rceb.bEventType = UWB_RC_CET_GENERAL; - reply->rceb.wEvent = UWB_RC_CMD_RESET; - result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply)); - if (result < 0) - goto out; - if (reply->bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(i1480->dev, "RESET: command execution failed: %u\n", - reply->bResultCode); - result = -EIO; - } -out: - return result; - -} - - -/* Wait for the MAC FW to start running */ -static -int i1480_fw_is_running_q(struct i1480 *i1480) -{ - int cnt = 0; - int result; - u32 *val = (u32 *) i1480->cmd_buf; - - for (cnt = 0; cnt < 10; cnt++) { - msleep(100); - result = i1480->read(i1480, 0x80080000, 4); - if (result < 0) { - dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result); - goto out; - } - if (*val == 0x55555555UL) /* fw running? cool */ - goto out; - } - dev_err(i1480->dev, "Timed out waiting for fw to start\n"); - result = -ETIMEDOUT; -out: - return result; - -} - - -/** - * Upload MAC firmware, wait for it to start - * - * @i1480: Device instance - * @fw_name: Name of the file that contains the firmware - * - * This has to be called after the pre fw has been uploaded (if - * there is any). - */ -int i1480_mac_fw_upload(struct i1480 *i1480) -{ - int result = 0, deprecated_name = 0; - struct i1480_rceb *rcebe = (void *) i1480->evt_buf; - - result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); - if (result == -ENOENT) { - result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, - "MAC"); - deprecated_name = 1; - } - if (result < 0) - return result; - if (deprecated_name == 1) - dev_warn(i1480->dev, - "WARNING: firmware file name %s is deprecated, " - "please rename to %s\n", - i1480->mac_fw_name_deprecate, i1480->mac_fw_name); - result = i1480_fw_is_running_q(i1480); - if (result < 0) - goto error_fw_not_running; - result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0; - if (result < 0) { - dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n", - result); - goto error_setup; - } - result = i1480->wait_init_done(i1480); /* wait init'on */ - if (result < 0) { - dev_err(i1480->dev, "MAC fw '%s': Initialization timed out " - "(%d)\n", i1480->mac_fw_name, result); - goto error_init_timeout; - } - /* verify we got the right initialization done event */ - if (i1480->evt_result != sizeof(*rcebe)) { - dev_err(i1480->dev, "MAC fw '%s': initialization event returns " - "wrong size (%zu bytes vs %zu needed)\n", - i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); - goto error_size; - } - result = -EIO; - if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1, - i1480_EVT_RM_INIT_DONE) < 0) { - dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x " - "received; expected 0x%02x/%04x/00\n", - rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent), - rcebe->rceb.bEventContext, i1480_CET_VS1, - i1480_EVT_RM_INIT_DONE); - goto error_init_timeout; - } - result = i1480_cmd_reset(i1480); - if (result < 0) - dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n", - i1480->mac_fw_name, result); -error_fw_not_running: -error_init_timeout: -error_size: -error_setup: - return result; -} diff --git a/drivers/staging/uwb/i1480/dfu/phy.c b/drivers/staging/uwb/i1480/dfu/phy.c deleted file mode 100644 index 13512c7dda0b..000000000000 --- a/drivers/staging/uwb/i1480/dfu/phy.c +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless UWB Link 1480 - * PHY parameters upload - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Code for uploading the PHY parameters to the PHY through the UWB - * Radio Control interface. - * - * We just send the data through the MPI interface using HWA-like - * commands and then reset the PHY to make sure it is ok. - */ -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/firmware.h> -#include "../../../wusbcore/include/wusb.h" -#include "i1480-dfu.h" - - -/** - * Write a value array to an address of the MPI interface - * - * @i1480: Device descriptor - * @data: Data array to write - * @size: Size of the data array - * @returns: 0 if ok, < 0 errno code on error. - * - * The data array is organized into pairs: - * - * ADDRESS VALUE - * - * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has - * to be a multiple of three. - */ -static -int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size) -{ - int result; - struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf; - struct i1480_evt_confirm *reply = i1480->evt_buf; - - BUG_ON(size > 480); - result = -ENOMEM; - cmd->rccb.bCommandType = i1480_CET_VS1; - cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE); - cmd->size = cpu_to_le16(size); - memcpy(cmd->data, data, size); - reply->rceb.bEventType = i1480_CET_VS1; - reply->rceb.wEvent = i1480_CMD_MPI_WRITE; - result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); - if (result < 0) - goto out; - if (reply->bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n", - reply->bResultCode); - result = -EIO; - } -out: - return result; -} - - -/** - * Read a value array to from an address of the MPI interface - * - * @i1480: Device descriptor - * @data: where to place the read array - * @srcaddr: Where to read from - * @size: Size of the data read array - * @returns: 0 if ok, < 0 errno code on error. - * - * The command data array is organized into pairs ADDR0 ADDR1..., and - * the returned data in ADDR0 VALUE0 ADDR1 VALUE1... - * - * We generate the command array to be a sequential read and then - * rearrange the result. - * - * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply. - * - * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount - * of values we can read is (512 - sizeof(*reply)) / 3 - */ -static -int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size) -{ - int result; - struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf; - struct i1480_evt_mpi_read *reply = i1480->evt_buf; - unsigned cnt; - - memset(i1480->cmd_buf, 0x69, 512); - memset(i1480->evt_buf, 0x69, 512); - - BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); - result = -ENOMEM; - cmd->rccb.bCommandType = i1480_CET_VS1; - cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ); - cmd->size = cpu_to_le16(3*size); - for (cnt = 0; cnt < size; cnt++) { - cmd->data[cnt].page = (srcaddr + cnt) >> 8; - cmd->data[cnt].offset = (srcaddr + cnt) & 0xff; - } - reply->rceb.bEventType = i1480_CET_VS1; - reply->rceb.wEvent = i1480_CMD_MPI_READ; - result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size, - sizeof(*reply) + 3*size); - if (result < 0) - goto out; - if (reply->bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n", - reply->bResultCode); - result = -EIO; - goto out; - } - for (cnt = 0; cnt < size; cnt++) { - if (reply->data[cnt].page != (srcaddr + cnt) >> 8) - dev_err(i1480->dev, "MPI-READ: page inconsistency at " - "index %u: expected 0x%02x, got 0x%02x\n", cnt, - (srcaddr + cnt) >> 8, reply->data[cnt].page); - if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) - dev_err(i1480->dev, "MPI-READ: offset inconsistency at " - "index %u: expected 0x%02x, got 0x%02x\n", cnt, - (srcaddr + cnt) & 0x00ff, - reply->data[cnt].offset); - data[cnt] = reply->data[cnt].value; - } - result = 0; -out: - return result; -} - - -/** - * Upload a PHY firmware, wait for it to start - * - * @i1480: Device instance - * @fw_name: Name of the file that contains the firmware - * - * We assume the MAC fw is up and running. This means we can use the - * MPI interface to write the PHY firmware. Once done, we issue an - * MBOA Reset, which will force the MAC to reset and reinitialize the - * PHY. If that works, we are ready to go. - * - * Max packet size for the MPI write is 512, so the max buffer is 480 - * (which gives us 160 byte triads of MSB, LSB and VAL for the data). - */ -int i1480_phy_fw_upload(struct i1480 *i1480) -{ - int result; - const struct firmware *fw; - const char *data_itr, *data_top; - const size_t MAX_BLK_SIZE = 480; /* 160 triads */ - size_t data_size; - u8 phy_stat; - - result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev); - if (result < 0) - goto out; - /* Loop writing data in chunks as big as possible until done. */ - for (data_itr = fw->data, data_top = data_itr + fw->size; - data_itr < data_top; data_itr += MAX_BLK_SIZE) { - data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr)); - result = i1480_mpi_write(i1480, data_itr, data_size); - if (result < 0) - goto error_mpi_write; - } - /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */ - result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1); - if (result < 0) { - dev_err(i1480->dev, "PHY: can't get status: %d\n", result); - goto error_mpi_status; - } - if (phy_stat != 0) { - result = -ENODEV; - dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat); - goto error_phy_status; - } - dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name); -error_phy_status: -error_mpi_status: -error_mpi_write: - release_firmware(fw); - if (result < 0) - dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), " - "power cycle device\n", i1480->phy_fw_name, result); -out: - return result; -} diff --git a/drivers/staging/uwb/i1480/dfu/usb.c b/drivers/staging/uwb/i1480/dfu/usb.c deleted file mode 100644 index d41086bdd783..000000000000 --- a/drivers/staging/uwb/i1480/dfu/usb.c +++ /dev/null @@ -1,448 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless UWB Link 1480 - * USB SKU firmware upload implementation - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This driver will prepare the i1480 device to behave as a real - * Wireless USB HWA adaptor by uploading the firmware. - * - * When the device is connected or driver is loaded, i1480_usb_probe() - * is called--this will allocate and initialize the device structure, - * fill in the pointers to the common functions (read, write, - * wait_init_done and cmd for HWA command execution) and once that is - * done, call the common firmware uploading routine. Then clean up and - * return -ENODEV, as we don't attach to the device. - * - * The rest are the basic ops we implement that the fw upload code - * uses to do its job. All the ops in the common code are i1480->NAME, - * the functions are i1480_usb_NAME(). - */ -#include <linux/module.h> -#include <linux/usb.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include "../../uwb.h" -#include "../../../wusbcore/include/wusb.h" -#include "../../../wusbcore/include/wusb-wa.h" -#include "i1480-dfu.h" - -struct i1480_usb { - struct i1480 i1480; - struct usb_device *usb_dev; - struct usb_interface *usb_iface; - struct urb *neep_urb; /* URB for reading from EP1 */ -}; - - -static -void i1480_usb_init(struct i1480_usb *i1480_usb) -{ - i1480_init(&i1480_usb->i1480); -} - - -static -int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface) -{ - struct usb_device *usb_dev = interface_to_usbdev(iface); - int result = -ENOMEM; - - i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ - i1480_usb->usb_iface = usb_get_intf(iface); - usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */ - i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL); - if (i1480_usb->neep_urb == NULL) - goto error; - return 0; - -error: - usb_set_intfdata(iface, NULL); - usb_put_intf(iface); - usb_put_dev(usb_dev); - return result; -} - - -static -void i1480_usb_destroy(struct i1480_usb *i1480_usb) -{ - usb_kill_urb(i1480_usb->neep_urb); - usb_free_urb(i1480_usb->neep_urb); - usb_set_intfdata(i1480_usb->usb_iface, NULL); - usb_put_intf(i1480_usb->usb_iface); - usb_put_dev(i1480_usb->usb_dev); -} - - -/** - * Write a buffer to a memory address in the i1480 device - * - * @i1480: i1480 instance - * @memory_address: - * Address where to write the data buffer to. - * @buffer: Buffer to the data - * @size: Size of the buffer [has to be < 512]. - * @returns: 0 if ok, < 0 errno code on error. - * - * Data buffers to USB cannot be on the stack or in vmalloc'ed areas, - * so we copy it to the local i1480 buffer before proceeding. In any - * case, we have a max size we can send. - */ -static -int i1480_usb_write(struct i1480 *i1480, u32 memory_address, - const void *buffer, size_t size) -{ - int result = 0; - struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); - size_t buffer_size, itr = 0; - - BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ - while (size > 0) { - buffer_size = size < i1480->buf_size ? size : i1480->buf_size; - memcpy(i1480->cmd_buf, buffer + itr, buffer_size); - result = usb_control_msg( - i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), - 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - memory_address, (memory_address >> 16), - i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); - if (result < 0) - break; - itr += result; - memory_address += result; - size -= result; - } - return result; -} - - -/** - * Read a block [max size 512] of the device's memory to @i1480's buffer. - * - * @i1480: i1480 instance - * @memory_address: - * Address where to read from. - * @size: Size to read. Smaller than or equal to 512. - * @returns: >= 0 number of bytes written if ok, < 0 errno code on error. - * - * NOTE: if the memory address or block is incorrect, you might get a - * stall or a different memory read. Caller has to verify the - * memory address and size passed back in the @neh structure. - */ -static -int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) -{ - ssize_t result = 0, bytes = 0; - size_t itr, read_size = i1480->buf_size; - struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); - - BUG_ON(size > i1480->buf_size); - BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ - BUG_ON(read_size > 512); - - if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */ - read_size = 4; - - for (itr = 0; itr < size; itr += read_size) { - size_t itr_addr = addr + itr; - size_t itr_size = min(read_size, size - itr); - result = usb_control_msg( - i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0), - 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - itr_addr, (itr_addr >> 16), - i1480->cmd_buf + itr, itr_size, - 100 /* FIXME: arbitrary */); - if (result < 0) { - dev_err(i1480->dev, "%s: USB read error: %zd\n", - __func__, result); - goto out; - } - if (result != itr_size) { - result = -EIO; - dev_err(i1480->dev, - "%s: partial read got only %zu bytes vs %zu expected\n", - __func__, result, itr_size); - goto out; - } - bytes += result; - } - result = bytes; -out: - return result; -} - - -/** - * Callback for reads on the notification/event endpoint - * - * Just enables the completion read handler. - */ -static -void i1480_usb_neep_cb(struct urb *urb) -{ - struct i1480 *i1480 = urb->context; - struct device *dev = i1480->dev; - - switch (urb->status) { - case 0: - break; - case -ECONNRESET: /* Not an error, but a controlled situation; */ - case -ENOENT: /* (we killed the URB)...so, no broadcast */ - dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status); - break; - case -ESHUTDOWN: /* going away! */ - dev_dbg(dev, "NEEP: down %d\n", urb->status); - break; - default: - dev_err(dev, "NEEP: unknown status %d\n", urb->status); - break; - } - i1480->evt_result = urb->actual_length; - complete(&i1480->evt_complete); - return; -} - - -/** - * Wait for the MAC FW to initialize - * - * MAC FW sends a 0xfd/0101/00 notification to EP1 when done - * initializing. Get that notification into i1480->evt_buf; upper layer - * will verify it. - * - * Set i1480->evt_result with the result of getting the event or its - * size (if successful). - * - * Delivers the data directly to i1480->evt_buf - */ -static -int i1480_usb_wait_init_done(struct i1480 *i1480) -{ - int result; - struct device *dev = i1480->dev; - struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); - struct usb_endpoint_descriptor *epd; - - init_completion(&i1480->evt_complete); - i1480->evt_result = -EINPROGRESS; - epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; - usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev, - usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), - i1480->evt_buf, i1480->buf_size, - i1480_usb_neep_cb, i1480, epd->bInterval); - result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); - if (result < 0) { - dev_err(dev, "init done: cannot submit NEEP read: %d\n", - result); - goto error_submit; - } - /* Wait for the USB callback to get the data */ - result = wait_for_completion_interruptible_timeout( - &i1480->evt_complete, HZ); - if (result <= 0) { - result = result == 0 ? -ETIMEDOUT : result; - goto error_wait; - } - usb_kill_urb(i1480_usb->neep_urb); - return 0; - -error_wait: - usb_kill_urb(i1480_usb->neep_urb); -error_submit: - i1480->evt_result = result; - return result; -} - - -/** - * Generic function for issuing commands to the i1480 - * - * @i1480: i1480 instance - * @cmd_name: Name of the command (for error messages) - * @cmd: Pointer to command buffer - * @cmd_size: Size of the command buffer - * @reply: Buffer for the reply event - * @reply_size: Expected size back (including RCEB); the reply buffer - * is assumed to be as big as this. - * @returns: >= 0 size of the returned event data if ok, - * < 0 errno code on error. - * - * Arms the NE handle, issues the command to the device and checks the - * basics of the reply event. - */ -static -int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) -{ - int result; - struct device *dev = i1480->dev; - struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); - struct usb_endpoint_descriptor *epd; - struct uwb_rccb *cmd = i1480->cmd_buf; - u8 iface_no; - - /* Post a read on the notification & event endpoint */ - iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; - epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; - usb_fill_int_urb( - i1480_usb->neep_urb, i1480_usb->usb_dev, - usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), - i1480->evt_buf, i1480->buf_size, - i1480_usb_neep_cb, i1480, epd->bInterval); - result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); - if (result < 0) { - dev_err(dev, "%s: cannot submit NEEP read: %d\n", - cmd_name, result); - goto error_submit_ep1; - } - /* Now post the command on EP0 */ - result = usb_control_msg( - i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), - WA_EXEC_RC_CMD, - USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, - 0, iface_no, - cmd, cmd_size, - 100 /* FIXME: this is totally arbitrary */); - if (result < 0) { - dev_err(dev, "%s: control request failed: %d\n", - cmd_name, result); - goto error_submit_ep0; - } - return result; - -error_submit_ep0: - usb_kill_urb(i1480_usb->neep_urb); -error_submit_ep1: - return result; -} - - -/* - * Probe a i1480 device for uploading firmware. - * - * We attach only to interface #0, which is the radio control interface. - */ -static -int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) -{ - struct usb_device *udev = interface_to_usbdev(iface); - struct i1480_usb *i1480_usb; - struct i1480 *i1480; - struct device *dev = &iface->dev; - int result; - - result = -ENODEV; - if (iface->cur_altsetting->desc.bInterfaceNumber != 0) { - dev_dbg(dev, "not attaching to iface %d\n", - iface->cur_altsetting->desc.bInterfaceNumber); - goto error; - } - if (iface->num_altsetting > 1 && - le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) { - /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ - result = usb_set_interface(interface_to_usbdev(iface), 0, 1); - if (result < 0) - dev_warn(dev, - "can't set altsetting 1 on iface 0: %d\n", - result); - } - - if (iface->cur_altsetting->desc.bNumEndpoints < 1) - return -ENODEV; - - result = -ENOMEM; - i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); - if (i1480_usb == NULL) { - dev_err(dev, "Unable to allocate instance\n"); - goto error; - } - i1480_usb_init(i1480_usb); - - i1480 = &i1480_usb->i1480; - i1480->buf_size = 512; - i1480->cmd_buf = kmalloc_array(2, i1480->buf_size, GFP_KERNEL); - if (i1480->cmd_buf == NULL) { - dev_err(dev, "Cannot allocate transfer buffers\n"); - result = -ENOMEM; - goto error_buf_alloc; - } - i1480->evt_buf = i1480->cmd_buf + i1480->buf_size; - - result = i1480_usb_create(i1480_usb, iface); - if (result < 0) { - dev_err(dev, "Cannot create instance: %d\n", result); - goto error_create; - } - - /* setup the fops and upload the firmware */ - i1480->pre_fw_name = "i1480-pre-phy-0.0.bin"; - i1480->mac_fw_name = "i1480-usb-0.0.bin"; - i1480->mac_fw_name_deprecate = "ptc-0.0.bin"; - i1480->phy_fw_name = "i1480-phy-0.0.bin"; - i1480->dev = &iface->dev; - i1480->write = i1480_usb_write; - i1480->read = i1480_usb_read; - i1480->rc_setup = NULL; - i1480->wait_init_done = i1480_usb_wait_init_done; - i1480->cmd = i1480_usb_cmd; - - result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */ - if (result >= 0) { - usb_reset_device(i1480_usb->usb_dev); - result = -ENODEV; /* we don't want to bind to the iface */ - } - i1480_usb_destroy(i1480_usb); -error_create: - kfree(i1480->cmd_buf); -error_buf_alloc: - kfree(i1480_usb); -error: - return result; -} - -MODULE_FIRMWARE("i1480-pre-phy-0.0.bin"); -MODULE_FIRMWARE("i1480-usb-0.0.bin"); -MODULE_FIRMWARE("i1480-phy-0.0.bin"); - -#define i1480_USB_DEV(v, p) \ -{ \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ - | USB_DEVICE_ID_MATCH_DEV_INFO \ - | USB_DEVICE_ID_MATCH_INT_INFO, \ - .idVendor = (v), \ - .idProduct = (p), \ - .bDeviceClass = 0xff, \ - .bDeviceSubClass = 0xff, \ - .bDeviceProtocol = 0xff, \ - .bInterfaceClass = 0xff, \ - .bInterfaceSubClass = 0xff, \ - .bInterfaceProtocol = 0xff, \ -} - - -/** USB device ID's that we handle */ -static const struct usb_device_id i1480_usb_id_table[] = { - i1480_USB_DEV(0x8086, 0xdf3b), - i1480_USB_DEV(0x15a9, 0x0005), - i1480_USB_DEV(0x07d1, 0x3802), - i1480_USB_DEV(0x050d, 0x305a), - i1480_USB_DEV(0x3495, 0x3007), - {}, -}; -MODULE_DEVICE_TABLE(usb, i1480_usb_id_table); - - -static struct usb_driver i1480_dfu_driver = { - .name = "i1480-dfu-usb", - .id_table = i1480_usb_id_table, - .probe = i1480_usb_probe, - .disconnect = NULL, -}; - -module_usb_driver(i1480_dfu_driver); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/uwb/i1480/i1480-est.c b/drivers/staging/uwb/i1480/i1480-est.c deleted file mode 100644 index 106e0a44b138..000000000000 --- a/drivers/staging/uwb/i1480/i1480-est.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless UWB Link 1480 - * Event Size tables for Wired Adaptors - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/usb.h> -#include "../uwb.h" -#include "dfu/i1480-dfu.h" - - -/** Event size table for wEvents 0x00XX */ -static struct uwb_est_entry i1480_est_fd00[] = { - /* Anybody expecting this response has to use - * neh->extra_size to specify the real size that will - * come back. */ - [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) }, - [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) }, -#ifdef i1480_RCEB_EXTENDED - [0x09] = { - .size = sizeof(struct i1480_rceb), - .offset = 1 + offsetof(struct i1480_rceb, wParamLength), - }, -#endif -}; - -/** Event size table for wEvents 0x01XX */ -static struct uwb_est_entry i1480_est_fd01[] = { - [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) }, - [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 }, - [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 }, - [0xff & i1480_EVT_DEV_ID_CHANGE] = { - .size = sizeof(struct i1480_rceb) + 2 }, -}; - -static int __init i1480_est_init(void) -{ - int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, - i1480_est_fd00, - ARRAY_SIZE(i1480_est_fd00)); - if (result < 0) { - printk(KERN_ERR "Can't register EST table fd00: %d\n", result); - return result; - } - result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, - i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); - if (result < 0) { - printk(KERN_ERR "Can't register EST table fd01: %d\n", result); - return result; - } - return 0; -} -module_init(i1480_est_init); - -static void __exit i1480_est_exit(void) -{ - uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, - i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); - uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, - i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); -} -module_exit(i1480_est_exit); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables"); -MODULE_LICENSE("GPL"); - -/** - * USB device ID's that we handle - * - * [so we are loaded when this kind device is connected] - */ -static struct usb_device_id __used i1480_est_id_table[] = { - { USB_DEVICE(0x8086, 0xdf3b), }, - { USB_DEVICE(0x8086, 0x0c3b), }, - { }, -}; -MODULE_DEVICE_TABLE(usb, i1480_est_id_table); diff --git a/drivers/staging/uwb/ie-rcv.c b/drivers/staging/uwb/ie-rcv.c deleted file mode 100644 index 51a4706e0dd3..000000000000 --- a/drivers/staging/uwb/ie-rcv.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * IE Received notification handling. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/bitmap.h> -#include "uwb-internal.h" - -/* - * Process an incoming IE Received notification. - */ -int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) -{ - int result = -EINVAL; - struct device *dev = &evt->rc->uwb_dev.dev; - struct uwb_rc_evt_ie_rcv *iercv; - - /* Is there enough data to decode it? */ - if (evt->notif.size < sizeof(*iercv)) { - dev_err(dev, "IE Received notification: Not enough data to " - "decode (%zu vs %zu bytes needed)\n", - evt->notif.size, sizeof(*iercv)); - goto error; - } - iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); - - dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); - - if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { - dev_warn(dev, "unhandled Relinquish Request IE\n"); - } - - return 0; -error: - return result; -} diff --git a/drivers/staging/uwb/ie.c b/drivers/staging/uwb/ie.c deleted file mode 100644 index b11678dd0380..000000000000 --- a/drivers/staging/uwb/ie.c +++ /dev/null @@ -1,366 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Information Element Handling - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Reinette Chatre <reinette.chatre@intel.com> - * - * FIXME: docs - */ - -#include <linux/slab.h> -#include <linux/export.h> -#include "uwb-internal.h" - -/** - * uwb_ie_next - get the next IE in a buffer - * @ptr: start of the buffer containing the IE data - * @len: length of the buffer - * - * Both @ptr and @len are updated so subsequent calls to uwb_ie_next() - * will get the next IE. - * - * NULL is returned (and @ptr and @len will not be updated) if there - * are no more IEs in the buffer or the buffer is too short. - */ -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len) -{ - struct uwb_ie_hdr *hdr; - size_t ie_len; - - if (*len < sizeof(struct uwb_ie_hdr)) - return NULL; - - hdr = *ptr; - ie_len = sizeof(struct uwb_ie_hdr) + hdr->length; - - if (*len < ie_len) - return NULL; - - *ptr += ie_len; - *len -= ie_len; - - return hdr; -} -EXPORT_SYMBOL_GPL(uwb_ie_next); - -/** - * uwb_ie_dump_hex - print IEs to a character buffer - * @ies: the IEs to print. - * @len: length of all the IEs. - * @buf: the destination buffer. - * @size: size of @buf. - * - * Returns the number of characters written. - */ -int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, - char *buf, size_t size) -{ - void *ptr; - const struct uwb_ie_hdr *ie; - int r = 0; - u8 *d; - - ptr = (void *)ies; - for (;;) { - ie = uwb_ie_next(&ptr, &len); - if (!ie) - break; - - r += scnprintf(buf + r, size - r, "%02x %02x", - (unsigned)ie->element_id, - (unsigned)ie->length); - d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr); - while (d != ptr && r < size) - r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++); - if (r < size) - buf[r++] = '\n'; - }; - - return r; -} - -/** - * Get the IEs that a radio controller is sending in its beacon - * - * @uwb_rc: UWB Radio Controller - * @returns: Size read from the system - * - * We don't need to lock the uwb_rc's mutex because we don't modify - * anything. Once done with the iedata buffer, call - * uwb_rc_ie_release(iedata). Don't call kfree on it. - */ -static -ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) -{ - ssize_t result; - struct device *dev = &uwb_rc->uwb_dev.dev; - struct uwb_rccb *cmd = NULL; - struct uwb_rceb *reply = NULL; - struct uwb_rc_evt_get_ie *get_ie; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - - cmd->bCommandType = UWB_RC_CET_GENERAL; - cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); - result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), - UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, - &reply); - kfree(cmd); - if (result < 0) - return result; - - get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); - if (result < sizeof(*get_ie)) { - dev_err(dev, "not enough data returned for decoding GET IE " - "(%zu bytes received vs %zu needed)\n", - result, sizeof(*get_ie)); - return -EINVAL; - } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { - dev_err(dev, "not enough data returned for decoding GET IE " - "payload (%zu bytes received vs %zu needed)\n", result, - sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); - return -EINVAL; - } - - *pget_ie = get_ie; - return result; -} - - -/** - * Replace all IEs currently being transmitted by a device - * - * @cmd: pointer to the SET-IE command with the IEs to set - * @size: size of @buf - */ -int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd) -{ - int result; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_evt_set_ie reply; - - reply.rceb.bEventType = UWB_RC_CET_GENERAL; - reply.rceb.wEvent = UWB_RC_CMD_SET_IE; - result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb, - sizeof(*cmd) + le16_to_cpu(cmd->wIELength), - &reply.rceb, sizeof(reply)); - if (result < 0) - goto error_cmd; - else if (result != sizeof(reply)) { - dev_err(dev, "SET-IE: not enough data to decode reply " - "(%d bytes received vs %zu needed)\n", - result, sizeof(reply)); - result = -EIO; - } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(dev, "SET-IE: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply.bResultCode), reply.bResultCode); - result = -EIO; - } else - result = 0; -error_cmd: - return result; -} - -/* Cleanup the whole IE management subsystem */ -void uwb_rc_ie_init(struct uwb_rc *uwb_rc) -{ - mutex_init(&uwb_rc->ies_mutex); -} - - -/** - * uwb_rc_ie_setup - setup a radio controller's IE manager - * @uwb_rc: the radio controller. - * - * The current set of IEs are obtained from the hardware with a GET-IE - * command (since the radio controller is not yet beaconing this will - * be just the hardware's MAC and PHY Capability IEs). - * - * Returns 0 on success; -ve on an error. - */ -int uwb_rc_ie_setup(struct uwb_rc *uwb_rc) -{ - struct uwb_rc_evt_get_ie *ie_info = NULL; - int capacity; - - capacity = uwb_rc_get_ie(uwb_rc, &ie_info); - if (capacity < 0) - return capacity; - - mutex_lock(&uwb_rc->ies_mutex); - - uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info; - uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; - uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); - uwb_rc->ies_capacity = capacity; - - mutex_unlock(&uwb_rc->ies_mutex); - - return 0; -} - - -/* Cleanup the whole IE management subsystem */ -void uwb_rc_ie_release(struct uwb_rc *uwb_rc) -{ - kfree(uwb_rc->ies); - uwb_rc->ies = NULL; - uwb_rc->ies_capacity = 0; -} - - -static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie) -{ - struct uwb_rc_cmd_set_ie *new_ies; - void *ptr, *prev_ie; - struct uwb_ie_hdr *ie; - size_t length, new_ie_len, new_capacity, size, prev_size; - - length = le16_to_cpu(rc->ies->wIELength); - new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length; - new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len; - - if (new_capacity > rc->ies_capacity) { - new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL); - if (!new_ies) - return -ENOMEM; - rc->ies = new_ies; - } - - ptr = rc->ies->IEData; - size = length; - for (;;) { - prev_ie = ptr; - prev_size = size; - ie = uwb_ie_next(&ptr, &size); - if (!ie || ie->element_id > new_ie->element_id) - break; - } - - memmove(prev_ie + new_ie_len, prev_ie, prev_size); - memcpy(prev_ie, new_ie, new_ie_len); - rc->ies->wIELength = cpu_to_le16(length + new_ie_len); - - return 0; -} - -/** - * uwb_rc_ie_add - add new IEs to the radio controller's beacon - * @uwb_rc: the radio controller. - * @ies: the buffer containing the new IE or IEs to be added to - * the device's beacon. - * @size: length of all the IEs. - * - * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB - * after the device sent the first beacon that includes the IEs specified - * in the SET IE command. We thus cannot send this command if the device is - * not beaconing. Instead, a SET IE command will be sent later right after - * we start beaconing. - * - * Setting an IE on the device will overwrite all current IEs in device. So - * we take the current IEs being transmitted by the device, insert the - * new one, and call SET IE with all the IEs needed. - * - * Returns 0 on success; or -ENOMEM. - */ -int uwb_rc_ie_add(struct uwb_rc *uwb_rc, - const struct uwb_ie_hdr *ies, size_t size) -{ - int result = 0; - void *ptr; - const struct uwb_ie_hdr *ie; - - mutex_lock(&uwb_rc->ies_mutex); - - ptr = (void *)ies; - for (;;) { - ie = uwb_ie_next(&ptr, &size); - if (!ie) - break; - - result = uwb_rc_ie_add_one(uwb_rc, ie); - if (result < 0) - break; - } - if (result >= 0) { - if (size == 0) { - if (uwb_rc->beaconing != -1) - result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); - } else - result = -EINVAL; - } - - mutex_unlock(&uwb_rc->ies_mutex); - - return result; -} -EXPORT_SYMBOL_GPL(uwb_rc_ie_add); - - -/* - * Remove an IE from internal cache - * - * We are dealing with our internal IE cache so no need to verify that the - * IEs are valid (it has been done already). - * - * Should be called with ies_mutex held - * - * We do not break out once an IE is found in the cache. It is currently - * possible to have more than one IE with the same ID included in the - * beacon. We don't reallocate, we just mark the size smaller. - */ -static -void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) -{ - struct uwb_ie_hdr *ie; - size_t len = le16_to_cpu(uwb_rc->ies->wIELength); - void *ptr; - size_t size; - - ptr = uwb_rc->ies->IEData; - size = len; - for (;;) { - ie = uwb_ie_next(&ptr, &size); - if (!ie) - break; - if (ie->element_id == to_remove) { - len -= sizeof(struct uwb_ie_hdr) + ie->length; - memmove(ie, ptr, size); - ptr = ie; - } - } - uwb_rc->ies->wIELength = cpu_to_le16(len); -} - - -/** - * uwb_rc_ie_rm - remove an IE from the radio controller's beacon - * @uwb_rc: the radio controller. - * @element_id: the element ID of the IE to remove. - * - * Only IEs previously added with uwb_rc_ie_add() may be removed. - * - * Returns 0 on success; or -ve the SET-IE command to the radio - * controller failed. - */ -int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) -{ - int result = 0; - - mutex_lock(&uwb_rc->ies_mutex); - - uwb_rc_ie_cache_rm(uwb_rc, element_id); - - if (uwb_rc->beaconing != -1) - result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); - - mutex_unlock(&uwb_rc->ies_mutex); - - return result; -} -EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); diff --git a/drivers/staging/uwb/include/debug-cmd.h b/drivers/staging/uwb/include/debug-cmd.h deleted file mode 100644 index f97db6c3bcc0..000000000000 --- a/drivers/staging/uwb/include/debug-cmd.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * Debug interface commands - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#ifndef __LINUX__UWB__DEBUG_CMD_H__ -#define __LINUX__UWB__DEBUG_CMD_H__ - -#include <linux/types.h> - -/* - * Debug interface commands - * - * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. - * - * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. - */ - -enum uwb_dbg_cmd_type { - UWB_DBG_CMD_RSV_ESTABLISH = 1, - UWB_DBG_CMD_RSV_TERMINATE = 2, - UWB_DBG_CMD_IE_ADD = 3, - UWB_DBG_CMD_IE_RM = 4, - UWB_DBG_CMD_RADIO_START = 5, - UWB_DBG_CMD_RADIO_STOP = 6, -}; - -struct uwb_dbg_cmd_rsv_establish { - __u8 target[6]; - __u8 type; - __u16 max_mas; - __u16 min_mas; - __u8 max_interval; -}; - -struct uwb_dbg_cmd_rsv_terminate { - int index; -}; - -struct uwb_dbg_cmd_ie { - __u8 data[128]; - int len; -}; - -struct uwb_dbg_cmd { - __u32 type; - union { - struct uwb_dbg_cmd_rsv_establish rsv_establish; - struct uwb_dbg_cmd_rsv_terminate rsv_terminate; - struct uwb_dbg_cmd_ie ie_add; - struct uwb_dbg_cmd_ie ie_rm; - }; -}; - -#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/drivers/staging/uwb/include/spec.h b/drivers/staging/uwb/include/spec.h deleted file mode 100644 index 5f75caf7b4ba..000000000000 --- a/drivers/staging/uwb/include/spec.h +++ /dev/null @@ -1,767 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB Standard definitions - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * All these definitions are based on the ECMA-368 standard. - * - * Note all definitions are Little Endian in the wire, and we will - * convert them to host order before operating on the bitfields (that - * yes, we use extensively). - */ - -#ifndef __LINUX__UWB_SPEC_H__ -#define __LINUX__UWB_SPEC_H__ - -#include <linux/types.h> -#include <linux/bitmap.h> -#include <linux/if_ether.h> - -#define i1480_FW 0x00000303 -/* #define i1480_FW 0x00000302 */ - -/** - * Number of Medium Access Slots in a superframe. - * - * UWB divides time in SuperFrames, each one divided in 256 pieces, or - * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the - * basic bandwidth allocation unit in UWB. - */ -enum { UWB_NUM_MAS = 256 }; - -/** - * Number of Zones in superframe. - * - * UWB divides the superframe into zones with numbering starting from BPST. - * See MBOA MAC[16.8.6] - */ -enum { UWB_NUM_ZONES = 16 }; - -/* - * Number of MAS in a zone. - */ -#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) - -/* - * Number of MAS required before a row can be considered available. - */ -#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) - -/* - * Number of streams per DRP reservation between a pair of devices. - * - * [ECMA-368] section 16.8.6. - */ -enum { UWB_NUM_STREAMS = 8 }; - -/* - * mMasLength - * - * The length of a MAS in microseconds. - * - * [ECMA-368] section 17.16. - */ -enum { UWB_MAS_LENGTH_US = 256 }; - -/* - * mBeaconSlotLength - * - * The length of the beacon slot in microseconds. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; - -/* - * mMaxLostBeacons - * - * The number beacons missing in consecutive superframes before a - * device can be considered as unreachable. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_MAX_LOST_BEACONS = 3 }; - -/* - * mDRPBackOffWinMin - * - * The minimum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; - -/* - * mDRPBackOffWinMax - * - * The maximum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; - -/* - * Length of a superframe in microseconds. - */ -#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) - -/** - * UWB MAC address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_mac_addr { - u8 data[ETH_ALEN]; -} __attribute__((packed)); - - -/** - * UWB device address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_dev_addr { - u8 data[2]; -} __attribute__((packed)); - - -/** - * Types of UWB addresses - * - * Order matters (by size). - */ -enum uwb_addr_type { - UWB_ADDR_DEV = 0, - UWB_ADDR_MAC = 1, -}; - - -/** Size of a char buffer for printing a MAC/device address */ -enum { UWB_ADDR_STRSIZE = 32 }; - - -/** UWB WiMedia protocol IDs. */ -enum uwb_prid { - UWB_PRID_WLP_RESERVED = 0x0000, - UWB_PRID_WLP = 0x0001, - UWB_PRID_WUSB_BOT = 0x0010, - UWB_PRID_WUSB = 0x0010, - UWB_PRID_WUSB_TOP = 0x001F, -}; - - -/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ -enum uwb_phy_rate { - UWB_PHY_RATE_53 = 0, - UWB_PHY_RATE_80, - UWB_PHY_RATE_106, - UWB_PHY_RATE_160, - UWB_PHY_RATE_200, - UWB_PHY_RATE_320, - UWB_PHY_RATE_400, - UWB_PHY_RATE_480, - UWB_PHY_RATE_INVALID -}; - - -/** - * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) - */ -enum uwb_scan_type { - UWB_SCAN_ONLY = 0, - UWB_SCAN_OUTSIDE_BP, - UWB_SCAN_WHILE_INACTIVE, - UWB_SCAN_DISABLED, - UWB_SCAN_ONLY_STARTTIME, - UWB_SCAN_TOP -}; - - -/** ACK Policy types (MBOA MAC[7.2.1.3]) */ -enum uwb_ack_pol { - UWB_ACK_NO = 0, - UWB_ACK_INM = 1, - UWB_ACK_B = 2, - UWB_ACK_B_REQ = 3, -}; - - -/** DRP reservation types ([ECMA-368 table 106) */ -enum uwb_drp_type { - UWB_DRP_TYPE_ALIEN_BP = 0, - UWB_DRP_TYPE_HARD, - UWB_DRP_TYPE_SOFT, - UWB_DRP_TYPE_PRIVATE, - UWB_DRP_TYPE_PCA, -}; - - -/** DRP Reason Codes ([ECMA-368] table 107) */ -enum uwb_drp_reason { - UWB_DRP_REASON_ACCEPTED = 0, - UWB_DRP_REASON_CONFLICT, - UWB_DRP_REASON_PENDING, - UWB_DRP_REASON_DENIED, - UWB_DRP_REASON_MODIFIED, -}; - -/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ -enum uwb_relinquish_req_reason { - UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, - UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, -}; - -/** - * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) - */ -enum uwb_drp_notif_reason { - UWB_DRP_NOTIF_DRP_IE_RCVD = 0, - UWB_DRP_NOTIF_CONFLICT, - UWB_DRP_NOTIF_TERMINATE, -}; - - -/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ -struct uwb_drp_alloc { - __le16 zone_bm; - __le16 mas_bm; -} __attribute__((packed)); - - -/** General MAC Header format (ECMA-368[16.2]) */ -struct uwb_mac_frame_hdr { - __le16 Frame_Control; - struct uwb_dev_addr DestAddr; - struct uwb_dev_addr SrcAddr; - __le16 Sequence_Control; - __le16 Access_Information; -} __attribute__((packed)); - - -/** - * uwb_beacon_frame - a beacon frame including MAC headers - * - * [ECMA] section 16.3. - */ -struct uwb_beacon_frame { - struct uwb_mac_frame_hdr hdr; - struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ - u8 Beacon_Slot_Number; - u8 Device_Control; - u8 IEData[]; -} __attribute__((packed)); - - -/** Information Element codes (MBOA MAC[T54]) */ -enum uwb_ie { - UWB_PCA_AVAILABILITY = 2, - UWB_IE_DRP_AVAILABILITY = 8, - UWB_IE_DRP = 9, - UWB_BP_SWITCH_IE = 11, - UWB_MAC_CAPABILITIES_IE = 12, - UWB_PHY_CAPABILITIES_IE = 13, - UWB_APP_SPEC_PROBE_IE = 15, - UWB_IDENTIFICATION_IE = 19, - UWB_MASTER_KEY_ID_IE = 20, - UWB_RELINQUISH_REQUEST_IE = 21, - UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ - UWB_APP_SPEC_IE = 255, -}; - - -/** - * Header common to all Information Elements (IEs) - */ -struct uwb_ie_hdr { - u8 element_id; /* enum uwb_ie */ - u8 length; -} __attribute__((packed)); - - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ -struct uwb_ie_drp { - struct uwb_ie_hdr hdr; - __le16 drp_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; -} - -static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; -} - -static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; -} - -static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; -} - -static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; -} - -static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; -} - -static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; -} - -static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, - enum uwb_drp_reason reason_code) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); - ie->drp_control = cpu_to_le16(drp_control); -} - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ -struct uwb_ie_drp_avail { - struct uwb_ie_hdr hdr; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ -struct uwb_relinquish_request_ie { - struct uwb_ie_hdr hdr; - __le16 relinquish_req_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) -{ - return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; -} - -static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, - int reason_code) -{ - u16 ctrl = le16_to_cpu(ie->relinquish_req_control); - ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); - ie->relinquish_req_control = cpu_to_le16(ctrl); -} - -/** - * The Vendor ID is set to an OUI that indicates the vendor of the device. - * ECMA-368 [16.8.10] - */ -struct uwb_vendor_id { - u8 data[3]; -} __attribute__((packed)); - -/** - * The device type ID - * FIXME: clarify what this means - * ECMA-368 [16.8.10] - */ -struct uwb_device_type_id { - u8 data[3]; -} __attribute__((packed)); - - -/** - * UWB device information types - * ECMA-368 [16.8.10] - */ -enum uwb_dev_info_type { - UWB_DEV_INFO_VENDOR_ID = 0, - UWB_DEV_INFO_VENDOR_TYPE, - UWB_DEV_INFO_NAME, -}; - -/** - * UWB device information found in Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_dev_info { - u8 type; /* enum uwb_dev_info_type */ - u8 length; - u8 data[]; -} __attribute__((packed)); - -/** - * UWB Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_identification_ie { - struct uwb_ie_hdr hdr; - struct uwb_dev_info info[]; -} __attribute__((packed)); - -/* - * UWB Radio Controller - * - * These definitions are common to the Radio Control layers as - * exported by the WUSB1.0 HWA and WHCI interfaces. - */ - -/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ -struct uwb_rccb { - u8 bCommandType; /* enum hwa_cet */ - __le16 wCommand; /* Command code */ - u8 bCommandContext; /* Context ID */ -} __attribute__((packed)); - - -/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ -struct uwb_rceb { - u8 bEventType; /* enum hwa_cet */ - __le16 wEvent; /* Event code */ - u8 bEventContext; /* Context ID */ -} __attribute__((packed)); - - -enum { - UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ - UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ -}; - -/* Commands to the radio controller */ -enum uwb_rc_cmd { - UWB_RC_CMD_CHANNEL_CHANGE = 16, - UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ - UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ - UWB_RC_CMD_RESET = 19, - UWB_RC_CMD_SCAN = 20, /* Scan management */ - UWB_RC_CMD_SET_BEACON_FILTER = 21, - UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ - UWB_RC_CMD_SET_IE = 23, /* Information Element management */ - UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, - UWB_RC_CMD_SET_TX_POWER = 25, - UWB_RC_CMD_SLEEP = 26, - UWB_RC_CMD_START_BEACON = 27, - UWB_RC_CMD_STOP_BEACON = 28, - UWB_RC_CMD_BP_MERGE = 29, - UWB_RC_CMD_SEND_COMMAND_FRAME = 30, - UWB_RC_CMD_SET_ASIE_NOTIF = 31, -}; - -/* Notifications from the radio controller */ -enum uwb_rc_evt { - UWB_RC_EVT_IE_RCV = 0, - UWB_RC_EVT_BEACON = 1, - UWB_RC_EVT_BEACON_SIZE = 2, - UWB_RC_EVT_BPOIE_CHANGE = 3, - UWB_RC_EVT_BP_SLOT_CHANGE = 4, - UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, - UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, - UWB_RC_EVT_DRP_AVAIL = 7, - UWB_RC_EVT_DRP = 8, - UWB_RC_EVT_BP_SWITCH_STATUS = 9, - UWB_RC_EVT_CMD_FRAME_RCV = 10, - UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, - /* Events (command responses) use the same code as the command */ - UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, -}; - -enum uwb_rc_extended_type_1_cmd { - UWB_RC_SET_DAA_ENERGY_MASK = 32, - UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, -}; - -enum uwb_rc_extended_type_1_evt { - UWB_RC_DAA_ENERGY_DETECTED = 0, -}; - -/* Radio Control Result Code. [WHCI] table 3-3. */ -enum { - UWB_RC_RES_SUCCESS = 0, - UWB_RC_RES_FAIL, - UWB_RC_RES_FAIL_HARDWARE, - UWB_RC_RES_FAIL_NO_SLOTS, - UWB_RC_RES_FAIL_BEACON_TOO_LARGE, - UWB_RC_RES_FAIL_INVALID_PARAMETER, - UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, - UWB_RC_RES_FAIL_INVALID_IE_DATA, - UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, - UWB_RC_RES_FAIL_CANCELLED, - UWB_RC_RES_FAIL_INVALID_STATE, - UWB_RC_RES_FAIL_INVALID_SIZE, - UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, - UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, - UWB_RC_RES_FAIL_TIME_OUT = 255, -}; - -/* Confirm event. [WHCI] section 3.1.3.1 etc. */ -struct uwb_rc_evt_confirm { - struct uwb_rceb rceb; - u8 bResultCode; -} __attribute__((packed)); - -/* Device Address Management event. [WHCI] section 3.1.3.2. */ -struct uwb_rc_evt_dev_addr_mgmt { - struct uwb_rceb rceb; - u8 baAddr[ETH_ALEN]; - u8 bResultCode; -} __attribute__((packed)); - - -/* Get IE Event. [WHCI] section 3.1.3.3. */ -struct uwb_rc_evt_get_ie { - struct uwb_rceb rceb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ -struct uwb_rc_evt_set_drp_ie { - struct uwb_rceb rceb; - __le16 wRemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Set IE Event. [WHCI] section 3.1.3.8. */ -struct uwb_rc_evt_set_ie { - struct uwb_rceb rceb; - __le16 RemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Scan command. [WHCI] 3.1.3.5. */ -struct uwb_rc_cmd_scan { - struct uwb_rccb rccb; - u8 bChannelNumber; - u8 bScanState; - __le16 wStartTime; -} __attribute__((packed)); - -/* Set DRP IE command. [WHCI] section 3.1.3.7. */ -struct uwb_rc_cmd_set_drp_ie { - struct uwb_rccb rccb; - __le16 wIELength; - struct uwb_ie_drp IEData[]; -} __attribute__((packed)); - -/* Set IE command. [WHCI] section 3.1.3.8. */ -struct uwb_rc_cmd_set_ie { - struct uwb_rccb rccb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ -struct uwb_rc_evt_set_daa_energy_mask { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ -struct uwb_rc_evt_set_notification_filter_ex { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* IE Received notification. [WHCI] section 3.1.4.1. */ -struct uwb_rc_evt_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr SrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Type of the received beacon. [WHCI] section 3.1.4.2. */ -enum uwb_rc_beacon_type { - UWB_RC_BEACON_TYPE_SCAN = 0, - UWB_RC_BEACON_TYPE_NEIGHBOR, - UWB_RC_BEACON_TYPE_OL_ALIEN, - UWB_RC_BEACON_TYPE_NOL_ALIEN, -}; - -/* Beacon received notification. [WHCI] 3.1.4.2. */ -struct uwb_rc_evt_beacon { - struct uwb_rceb rceb; - u8 bChannelNumber; - u8 bBeaconType; - __le16 wBPSTOffset; - u8 bLQI; - u8 bRSSI; - __le16 wBeaconInfoLength; - u8 BeaconInfo[]; -} __attribute__((packed)); - - -/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ -struct uwb_rc_evt_beacon_size { - struct uwb_rceb rceb; - __le16 wNewBeaconSize; -} __attribute__((packed)); - - -/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ -struct uwb_rc_evt_bpoie_change { - struct uwb_rceb rceb; - __le16 wBPOIELength; - u8 BPOIE[]; -} __attribute__((packed)); - - -/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ -struct uwb_rc_evt_bp_slot_change { - struct uwb_rceb rceb; - u8 slot_info; -} __attribute__((packed)); - -static inline int uwb_rc_evt_bp_slot_change_slot_num( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return evt->slot_info & 0x7f; -} - -static inline int uwb_rc_evt_bp_slot_change_no_slot( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return (evt->slot_info & 0x80) >> 7; -} - -/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ -struct uwb_rc_evt_bp_switch_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ -struct uwb_rc_evt_dev_addr_conflict { - struct uwb_rceb rceb; -} __attribute__((packed)); - -/* DRP notification. [WHCI] section 3.1.4.9. */ -struct uwb_rc_evt_drp { - struct uwb_rceb rceb; - struct uwb_dev_addr src_addr; - u8 reason; - u8 beacon_slot_number; - __le16 ie_length; - u8 ie_data[]; -} __attribute__((packed)); - -static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) -{ - return evt->reason & 0x0f; -} - - -/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ -struct uwb_rc_evt_drp_avail { - struct uwb_rceb rceb; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* BP switch status notification. [WHCI] section 3.1.4.10. */ -struct uwb_rc_evt_bp_switch_status { - struct uwb_rceb rceb; - u8 status; - u8 slot_offset; - __le16 bpst_offset; - u8 move_countdown; -} __attribute__((packed)); - -/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ -struct uwb_rc_evt_cmd_frame_rcv { - struct uwb_rceb rceb; - __le16 receive_time; - struct uwb_dev_addr wSrcAddr; - struct uwb_dev_addr wDstAddr; - __le16 control; - __le16 reserved; - __le16 dataLength; - u8 data[]; -} __attribute__((packed)); - -/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ -struct uwb_rc_evt_channel_change_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ -struct uwb_rc_evt_daa_energy_detected { - struct uwb_rceb rceb; - __le16 wLength; - u8 bandID; - u8 reserved; - u8 toneBmp[16]; -} __attribute__((packed)); - - -/** - * Radio Control Interface Class Descriptor - * - * WUSB 1.0 [8.6.1.2] - */ -struct uwb_rc_control_intf_class_desc { - u8 bLength; - u8 bDescriptorType; - __le16 bcdRCIVersion; -} __attribute__((packed)); - -#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/drivers/staging/uwb/include/umc.h b/drivers/staging/uwb/include/umc.h deleted file mode 100644 index ddbceb39ad15..000000000000 --- a/drivers/staging/uwb/include/umc.h +++ /dev/null @@ -1,192 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * UWB Multi-interface Controller support. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - * - * UMC (UWB Multi-interface Controller) capabilities (e.g., radio - * controller, host controller) are presented as devices on the "umc" - * bus. - * - * The radio controller is not strictly a UMC capability but it's - * useful to present it as such. - * - * References: - * - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - * - * How this works is kind of convoluted but simple. The whci.ko driver - * loads when WHCI devices are detected. These WHCI devices expose - * many devices in the same PCI function (they couldn't have reused - * functions, no), so for each PCI function that exposes these many - * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] - * with umc_device_create() and adds it to the bus with - * umc_device_register(). - * - * umc_device_register() calls device_register() which will push the - * bus management code to load your UMC driver's somehting_probe() - * that you have registered for that capability code. - * - * Now when the WHCI device is removed, whci_remove() will go over - * each umc_dev assigned to each of the PCI function's capabilities - * and through whci_del_cap() call umc_device_unregister() each - * created umc_dev. Of course, if you are bound to the device, your - * driver's something_remove() will be called. - */ - -#ifndef _LINUX_UWB_UMC_H_ -#define _LINUX_UWB_UMC_H_ - -#include <linux/device.h> -#include <linux/pci.h> - -/* - * UMC capability IDs. - * - * 0x00 is reserved so use it for the radio controller device. - * - * [WHCI] table 2-8 - */ -#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ -#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ - -/** - * struct umc_dev - UMC capability device - * - * @version: version of the specification this capability conforms to. - * @cap_id: capability ID. - * @bar: PCI Bar (64 bit) where the resource lies - * @resource: register space resource. - * @irq: interrupt line. - */ -struct umc_dev { - u16 version; - u8 cap_id; - u8 bar; - struct resource resource; - unsigned irq; - struct device dev; -}; - -#define to_umc_dev(d) container_of(d, struct umc_dev, dev) - -/** - * struct umc_driver - UMC capability driver - * @cap_id: supported capability ID. - * @match: driver specific capability matching function. - * @match_data: driver specific data for match() (e.g., a - * table of pci_device_id's if umc_match_pci_id() is used). - */ -struct umc_driver { - char *name; - u8 cap_id; - int (*match)(struct umc_driver *, struct umc_dev *); - const void *match_data; - - int (*probe)(struct umc_dev *); - void (*remove)(struct umc_dev *); - int (*pre_reset)(struct umc_dev *); - int (*post_reset)(struct umc_dev *); - - struct device_driver driver; -}; - -#define to_umc_driver(d) container_of(d, struct umc_driver, driver) - -extern struct bus_type umc_bus_type; - -struct umc_dev *umc_device_create(struct device *parent, int n); -int __must_check umc_device_register(struct umc_dev *umc); -void umc_device_unregister(struct umc_dev *umc); - -int __must_check __umc_driver_register(struct umc_driver *umc_drv, - struct module *mod, - const char *mod_name); - -/** - * umc_driver_register - register a UMC capabiltity driver. - * @umc_drv: pointer to the driver. - */ -#define umc_driver_register(umc_drv) \ - __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME) - -void umc_driver_unregister(struct umc_driver *umc_drv); - -/* - * Utility function you can use to match (umc_driver->match) against a - * null-terminated array of 'struct pci_device_id' in - * umc_driver->match_data. - */ -int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); - -/** - * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none - * @umc_dev: UMC device whose parent PCI device we are looking for - * - * DIRTY!!! DON'T RELY ON THIS - * - * FIXME: This is as dirty as it gets, but we need some way to check - * the correct type of umc_dev->parent (so that for example, we can - * cast to pci_dev). Casting to pci_dev is necessary because at some - * point we need to request resources from the device. Mapping is - * easily over come (ioremap and stuff are bus agnostic), but hooking - * up to some error handlers (such as pci error handlers) might need - * this. - * - * THIS might (probably will) be removed in the future, so don't count - * on it. - */ -static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) -{ - struct pci_dev *pci_dev = NULL; - if (dev_is_pci(umc_dev->dev.parent)) - pci_dev = to_pci_dev(umc_dev->dev.parent); - return pci_dev; -} - -/** - * umc_dev_get() - reference a UMC device. - * @umc_dev: Pointer to UMC device. - * - * NOTE: we are assuming in this whole scheme that the parent device - * is referenced at _probe() time and unreferenced at _remove() - * time by the parent's subsystem. - */ -static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) -{ - get_device(&umc_dev->dev); - return umc_dev; -} - -/** - * umc_dev_put() - unreference a UMC device. - * @umc_dev: Pointer to UMC device. - */ -static inline void umc_dev_put(struct umc_dev *umc_dev) -{ - put_device(&umc_dev->dev); -} - -/** - * umc_set_drvdata - set UMC device's driver data. - * @umc_dev: Pointer to UMC device. - * @data: Data to set. - */ -static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) -{ - dev_set_drvdata(&umc_dev->dev, data); -} - -/** - * umc_get_drvdata - recover UMC device's driver data. - * @umc_dev: Pointer to UMC device. - */ -static inline void *umc_get_drvdata(struct umc_dev *umc_dev) -{ - return dev_get_drvdata(&umc_dev->dev); -} - -int umc_controller_reset(struct umc_dev *umc); - -#endif /* #ifndef _LINUX_UWB_UMC_H_ */ diff --git a/drivers/staging/uwb/include/whci.h b/drivers/staging/uwb/include/whci.h deleted file mode 100644 index 1a5c2cc2b008..000000000000 --- a/drivers/staging/uwb/include/whci.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * References: - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - */ -#ifndef _LINUX_UWB_WHCI_H_ -#define _LINUX_UWB_WHCI_H_ - -#include <linux/pci.h> - -/* - * UWB interface capability registers (offsets from UWBBASE) - * - * [WHCI] section 2.2 - */ -#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ -# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) -#define UWBCAPDATA(n) (8*(n)) -# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) -# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) -# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) -# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) -# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) - -/* Size of the WHCI capability data (including the RC capability) for - a device with n capabilities. */ -#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) - - -/* - * URC registers (offsets from URCBASE) - * - * [WHCI] section 2.3 - */ -#define URCCMD 0x00 -# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ -# define URCCMD_RS (1 << 30) /* Run/Stop */ -# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ -# define URCCMD_ACTIVE (1 << 15) /* Command is active */ -# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ -# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ -#define URCSTS 0x04 -# define URCSTS_EPS (1 << 17) /* Event Processing Status */ -# define URCSTS_HALTED (1 << 16) /* RC halted */ -# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ -# define URCSTS_ER (1 << 9) /* Event Ready */ -# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ -# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ -# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ -#define URCINTR 0x08 -# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ -#define URCCMDADDR 0x10 -#define URCEVTADDR 0x18 -# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ - - -/** Write 32 bit @value to little endian register at @addr */ -static inline -void le_writel(u32 value, void __iomem *addr) -{ - iowrite32(value, addr); -} - - -/** Read from 32 bit little endian register at @addr */ -static inline -u32 le_readl(void __iomem *addr) -{ - return ioread32(addr); -} - - -/** Write 64 bit @value to little endian register at @addr */ -static inline -void le_writeq(u64 value, void __iomem *addr) -{ - iowrite32(value, addr); - iowrite32(value >> 32, addr + 4); -} - - -/** Read from 64 bit little endian register at @addr */ -static inline -u64 le_readq(void __iomem *addr) -{ - u64 value; - value = ioread32(addr); - value |= (u64)ioread32(addr + 4) << 32; - return value; -} - -extern int whci_wait_for(struct device *dev, u32 __iomem *reg, - u32 mask, u32 result, - unsigned long max_ms, const char *tag); - -#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ diff --git a/drivers/staging/uwb/lc-dev.c b/drivers/staging/uwb/lc-dev.c deleted file mode 100644 index 3e5c07fd6b10..000000000000 --- a/drivers/staging/uwb/lc-dev.c +++ /dev/null @@ -1,457 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Life cycle of devices - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/device.h> -#include <linux/export.h> -#include <linux/err.h> -#include <linux/kdev_t.h> -#include <linux/random.h> -#include <linux/stat.h> -#include "uwb-internal.h" - -/* We initialize addresses to 0xff (invalid, as it is bcast) */ -static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) -{ - memset(&addr->data, 0xff, sizeof(addr->data)); -} - -static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr) -{ - memset(&addr->data, 0xff, sizeof(addr->data)); -} - -/* - * Add callback @new to be called when an event occurs in @rc. - */ -int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new) -{ - if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) - return -ERESTARTSYS; - list_add(&new->list_node, &rc->notifs_chain.list); - mutex_unlock(&rc->notifs_chain.mutex); - return 0; -} -EXPORT_SYMBOL_GPL(uwb_notifs_register); - -/* - * Remove event handler (callback) - */ -int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry) -{ - if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) - return -ERESTARTSYS; - list_del(&entry->list_node); - mutex_unlock(&rc->notifs_chain.mutex); - return 0; -} -EXPORT_SYMBOL_GPL(uwb_notifs_deregister); - -/* - * Notify all event handlers of a given event on @rc - * - * We are called with a valid reference to the device, or NULL if the - * event is not for a particular event (e.g., a BG join event). - */ -void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event) -{ - struct uwb_notifs_handler *handler; - if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) - return; - if (!list_empty(&rc->notifs_chain.list)) { - list_for_each_entry(handler, &rc->notifs_chain.list, list_node) { - handler->cb(handler->data, uwb_dev, event); - } - } - mutex_unlock(&rc->notifs_chain.mutex); -} - -/* - * Release the backing device of a uwb_dev that has been dynamically allocated. - */ -static void uwb_dev_sys_release(struct device *dev) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - - uwb_bce_put(uwb_dev->bce); - memset(uwb_dev, 0x69, sizeof(*uwb_dev)); - kfree(uwb_dev); -} - -/* - * Initialize a UWB device instance - * - * Alloc, zero and call this function. - */ -void uwb_dev_init(struct uwb_dev *uwb_dev) -{ - mutex_init(&uwb_dev->mutex); - device_initialize(&uwb_dev->dev); - uwb_dev->dev.release = uwb_dev_sys_release; - uwb_dev_addr_init(&uwb_dev->dev_addr); - uwb_mac_addr_init(&uwb_dev->mac_addr); - bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS); -} - -static ssize_t uwb_dev_EUI_48_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - char addr[UWB_ADDR_STRSIZE]; - - uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr); - return sprintf(buf, "%s\n", addr); -} -static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL); - -static ssize_t uwb_dev_DevAddr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - char addr[UWB_ADDR_STRSIZE]; - - uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr); - return sprintf(buf, "%s\n", addr); -} -static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL); - -/* - * Show the BPST of this device. - * - * Calculated from the receive time of the device's beacon and it's - * slot number. - */ -static ssize_t uwb_dev_BPST_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_beca_e *bce; - struct uwb_beacon_frame *bf; - u16 bpst; - - bce = uwb_dev->bce; - mutex_lock(&bce->mutex); - bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; - bpst = bce->be->wBPSTOffset - - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US); - mutex_unlock(&bce->mutex); - - return sprintf(buf, "%d\n", bpst); -} -static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL); - -/* - * Show the IEs a device is beaconing - * - * We need to access the beacon cache, so we just lock it really - * quick, print the IEs and unlock. - * - * We have a reference on the cache entry, so that should be - * quite safe. - */ -static ssize_t uwb_dev_IEs_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - - return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE); -} -static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL); - -static ssize_t uwb_dev_LQE_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_beca_e *bce = uwb_dev->bce; - size_t result; - - mutex_lock(&bce->mutex); - result = stats_show(&uwb_dev->bce->lqe_stats, buf); - mutex_unlock(&bce->mutex); - return result; -} - -static ssize_t uwb_dev_LQE_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_beca_e *bce = uwb_dev->bce; - ssize_t result; - - mutex_lock(&bce->mutex); - result = stats_store(&uwb_dev->bce->lqe_stats, buf, size); - mutex_unlock(&bce->mutex); - return result; -} -static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store); - -static ssize_t uwb_dev_RSSI_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_beca_e *bce = uwb_dev->bce; - size_t result; - - mutex_lock(&bce->mutex); - result = stats_show(&uwb_dev->bce->rssi_stats, buf); - mutex_unlock(&bce->mutex); - return result; -} - -static ssize_t uwb_dev_RSSI_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_beca_e *bce = uwb_dev->bce; - ssize_t result; - - mutex_lock(&bce->mutex); - result = stats_store(&uwb_dev->bce->rssi_stats, buf, size); - mutex_unlock(&bce->mutex); - return result; -} -static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store); - - -static struct attribute *uwb_dev_attrs[] = { - &dev_attr_EUI_48.attr, - &dev_attr_DevAddr.attr, - &dev_attr_BPST.attr, - &dev_attr_IEs.attr, - &dev_attr_LQE.attr, - &dev_attr_RSSI.attr, - NULL, -}; -ATTRIBUTE_GROUPS(uwb_dev); - -/* UWB bus type. */ -struct bus_type uwb_bus_type = { - .name = "uwb", - .dev_groups = uwb_dev_groups, -}; - -/** - * Device SYSFS registration - */ -static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) -{ - struct device *dev; - - dev = &uwb_dev->dev; - dev->parent = parent_dev; - dev_set_drvdata(dev, uwb_dev); - - return device_add(dev); -} - - -static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) -{ - dev_set_drvdata(&uwb_dev->dev, NULL); - device_del(&uwb_dev->dev); -} - - -/** - * Register and initialize a new UWB device - * - * Did you call uwb_dev_init() on it? - * - * @parent_rc: is the parent radio controller who has the link to the - * device. When registering the UWB device that is a UWB - * Radio Controller, we point back to it. - * - * If registering the device that is part of a radio, caller has set - * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will - * be allocated. - */ -int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, - struct uwb_rc *parent_rc) -{ - int result; - struct device *dev; - - BUG_ON(uwb_dev == NULL); - BUG_ON(parent_dev == NULL); - BUG_ON(parent_rc == NULL); - - mutex_lock(&uwb_dev->mutex); - dev = &uwb_dev->dev; - uwb_dev->rc = parent_rc; - result = __uwb_dev_sys_add(uwb_dev, parent_dev); - if (result < 0) - printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n", - dev_name(dev), result); - mutex_unlock(&uwb_dev->mutex); - return result; -} - - -void uwb_dev_rm(struct uwb_dev *uwb_dev) -{ - mutex_lock(&uwb_dev->mutex); - __uwb_dev_sys_rm(uwb_dev); - mutex_unlock(&uwb_dev->mutex); -} - - -static -int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev) -{ - struct uwb_dev *target_uwb_dev = __target_uwb_dev; - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - if (uwb_dev == target_uwb_dev) { - uwb_dev_get(uwb_dev); - return 1; - } else - return 0; -} - - -/** - * Given a UWB device descriptor, validate and refcount it - * - * @returns NULL if the device does not exist or is quiescing; the ptr to - * it otherwise. - */ -struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev) -{ - if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev)) - return uwb_dev; - else - return NULL; -} -EXPORT_SYMBOL_GPL(uwb_dev_try_get); - - -/** - * Remove a device from the system [grunt for other functions] - */ -int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) -{ - struct device *dev = &uwb_dev->dev; - char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; - - uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); - uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); - dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", - macbuf, devbuf, - uwb_dev->dev.bus->name, - rc ? dev_name(&(rc->uwb_dev.dev)) : ""); - uwb_dev_rm(uwb_dev); - list_del(&uwb_dev->bce->node); - uwb_bce_put(uwb_dev->bce); - uwb_dev_put(uwb_dev); /* for the creation in _onair() */ - - return 0; -} - - -/** - * A device went off the air, clean up after it! - * - * This is called by the UWB Daemon (through the beacon purge function - * uwb_bcn_cache_purge) when it is detected that a device has been in - * radio silence for a while. - * - * If this device is actually a local radio controller we don't need - * to go through the offair process, as it is not registered as that. - * - * NOTE: uwb_bcn_cache.mutex is held! - */ -void uwbd_dev_offair(struct uwb_beca_e *bce) -{ - struct uwb_dev *uwb_dev; - - uwb_dev = bce->uwb_dev; - if (uwb_dev) { - uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR); - __uwb_dev_offair(uwb_dev, uwb_dev->rc); - } -} - - -/** - * A device went on the air, start it up! - * - * This is called by the UWB Daemon when it is detected that a device - * has popped up in the radio range of the radio controller. - * - * It will just create the freaking device, register the beacon and - * stuff and yatla, done. - * - * - * NOTE: uwb_beca.mutex is held, bce->mutex is held - */ -void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) -{ - int result; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_dev *uwb_dev; - char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; - - uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr); - uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr); - uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL); - if (uwb_dev == NULL) { - dev_err(dev, "new device %s: Cannot allocate memory\n", - macbuf); - return; - } - uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ - uwb_dev->dev.bus = &uwb_bus_type; - uwb_dev->mac_addr = *bce->mac_addr; - uwb_dev->dev_addr = bce->dev_addr; - dev_set_name(&uwb_dev->dev, "%s", macbuf); - - /* plug the beacon cache */ - bce->uwb_dev = uwb_dev; - uwb_dev->bce = bce; - uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ - - result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); - if (result < 0) { - dev_err(dev, "new device %s: cannot instantiate device\n", - macbuf); - goto error_dev_add; - } - - dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", - macbuf, devbuf, uwb_dev->dev.bus->name, - dev_name(&(rc->uwb_dev.dev))); - uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR); - return; - -error_dev_add: - bce->uwb_dev = NULL; - uwb_bce_put(bce); - kfree(uwb_dev); - return; -} - -/** - * Iterate over the list of UWB devices, calling a @function on each - * - * See docs for bus_for_each().... - * - * @rc: radio controller for the devices. - * @function: function to call. - * @priv: data to pass to @function. - * @returns: 0 if no invocation of function() returned a value - * different to zero. That value otherwise. - */ -int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv) -{ - return device_for_each_child(&rc->uwb_dev.dev, priv, function); -} -EXPORT_SYMBOL_GPL(uwb_dev_for_each); diff --git a/drivers/staging/uwb/lc-rc.c b/drivers/staging/uwb/lc-rc.c deleted file mode 100644 index ee31b221cdc2..000000000000 --- a/drivers/staging/uwb/lc-rc.c +++ /dev/null @@ -1,569 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Life cycle of radio controllers - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * - * A UWB radio controller is also a UWB device, so it embeds one... - * - * List of RCs comes from the 'struct class uwb_rc_class'. - */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/random.h> -#include <linux/kdev_t.h> -#include <linux/etherdevice.h> -#include <linux/usb.h> -#include <linux/slab.h> -#include <linux/export.h> - -#include "uwb-internal.h" - -static int uwb_rc_index_match(struct device *dev, const void *data) -{ - const int *index = data; - struct uwb_rc *rc = dev_get_drvdata(dev); - - if (rc->index == *index) - return 1; - return 0; -} - -static struct uwb_rc *uwb_rc_find_by_index(int index) -{ - struct device *dev; - struct uwb_rc *rc = NULL; - - dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); - if (dev) { - rc = dev_get_drvdata(dev); - put_device(dev); - } - - return rc; -} - -static int uwb_rc_new_index(void) -{ - int index = 0; - - for (;;) { - if (!uwb_rc_find_by_index(index)) - return index; - if (++index < 0) - index = 0; - } -} - -/** - * Release the backing device of a uwb_rc that has been dynamically allocated. - */ -static void uwb_rc_sys_release(struct device *dev) -{ - struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); - struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); - - uwb_rc_ie_release(rc); - kfree(rc); -} - - -void uwb_rc_init(struct uwb_rc *rc) -{ - struct uwb_dev *uwb_dev = &rc->uwb_dev; - - uwb_dev_init(uwb_dev); - rc->uwb_dev.dev.class = &uwb_rc_class; - rc->uwb_dev.dev.release = uwb_rc_sys_release; - uwb_rc_neh_create(rc); - rc->beaconing = -1; - rc->scan_type = UWB_SCAN_DISABLED; - INIT_LIST_HEAD(&rc->notifs_chain.list); - mutex_init(&rc->notifs_chain.mutex); - INIT_LIST_HEAD(&rc->uwb_beca.list); - mutex_init(&rc->uwb_beca.mutex); - uwb_drp_avail_init(rc); - uwb_rc_ie_init(rc); - uwb_rsv_init(rc); - uwb_rc_pal_init(rc); -} -EXPORT_SYMBOL_GPL(uwb_rc_init); - - -struct uwb_rc *uwb_rc_alloc(void) -{ - struct uwb_rc *rc; - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (rc == NULL) - return NULL; - uwb_rc_init(rc); - return rc; -} -EXPORT_SYMBOL_GPL(uwb_rc_alloc); - -/* - * Show the ASIE that is broadcast in the UWB beacon by this uwb_rc device. - */ -static ssize_t ASIE_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - struct uwb_ie_hdr *ie; - void *ptr; - size_t len; - int result = 0; - - /* init empty buffer. */ - result = scnprintf(buf, PAGE_SIZE, "\n"); - mutex_lock(&rc->ies_mutex); - /* walk IEData looking for an ASIE. */ - ptr = rc->ies->IEData; - len = le16_to_cpu(rc->ies->wIELength); - for (;;) { - ie = uwb_ie_next(&ptr, &len); - if (!ie) - break; - if (ie->element_id == UWB_APP_SPEC_IE) { - result = uwb_ie_dump_hex(ie, - ie->length + sizeof(struct uwb_ie_hdr), - buf, PAGE_SIZE); - break; - } - } - mutex_unlock(&rc->ies_mutex); - - return result; -} - -/* - * Update the ASIE that is broadcast in the UWB beacon by this uwb_rc device. - */ -static ssize_t ASIE_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - char ie_buf[255]; - int result, ie_len = 0; - const char *cur_ptr = buf; - struct uwb_ie_hdr *ie; - - /* empty string means clear the ASIE. */ - if (strlen(buf) <= 1) { - uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE); - return size; - } - - /* if non-empty string, convert string of hex chars to binary. */ - while (ie_len < sizeof(ie_buf)) { - int char_count; - - if (sscanf(cur_ptr, " %02hhX %n", - &(ie_buf[ie_len]), &char_count) > 0) { - ++ie_len; - /* skip chars read from cur_ptr. */ - cur_ptr += char_count; - } else { - break; - } - } - - /* validate IE length and type. */ - if (ie_len < sizeof(struct uwb_ie_hdr)) { - dev_err(dev, "%s: Invalid ASIE size %d.\n", __func__, ie_len); - return -EINVAL; - } - - ie = (struct uwb_ie_hdr *)ie_buf; - if (ie->element_id != UWB_APP_SPEC_IE) { - dev_err(dev, "%s: Invalid IE element type size = 0x%02X.\n", - __func__, ie->element_id); - return -EINVAL; - } - - /* bounds check length field from user. */ - if (ie->length > (ie_len - sizeof(struct uwb_ie_hdr))) - ie->length = ie_len - sizeof(struct uwb_ie_hdr); - - /* - * Valid ASIE received. Remove current ASIE then add the new one using - * uwb_rc_ie_add. - */ - uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE); - - result = uwb_rc_ie_add(rc, ie, ie->length + sizeof(struct uwb_ie_hdr)); - - return result >= 0 ? size : result; -} -static DEVICE_ATTR_RW(ASIE); - -static struct attribute *rc_attrs[] = { - &dev_attr_mac_address.attr, - &dev_attr_scan.attr, - &dev_attr_beacon.attr, - &dev_attr_ASIE.attr, - NULL, -}; - -static const struct attribute_group rc_attr_group = { - .attrs = rc_attrs, -}; - -/* - * Registration of sysfs specific stuff - */ -static int uwb_rc_sys_add(struct uwb_rc *rc) -{ - return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); -} - - -static void __uwb_rc_sys_rm(struct uwb_rc *rc) -{ - sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); -} - -/** - * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it - * @rc: the radio controller. - * - * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF - * then a random locally administered EUI-48 is generated and set on - * the device. The probability of address collisions is sufficiently - * unlikely (1/2^40 = 9.1e-13) that they're not checked for. - */ -static -int uwb_rc_mac_addr_setup(struct uwb_rc *rc) -{ - int result; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_dev *uwb_dev = &rc->uwb_dev; - char devname[UWB_ADDR_STRSIZE]; - struct uwb_mac_addr addr; - - result = uwb_rc_mac_addr_get(rc, &addr); - if (result < 0) { - dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result); - return result; - } - - if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) { - addr.data[0] = 0x02; /* locally administered and unicast */ - get_random_bytes(&addr.data[1], sizeof(addr.data)-1); - - result = uwb_rc_mac_addr_set(rc, &addr); - if (result < 0) { - uwb_mac_addr_print(devname, sizeof(devname), &addr); - dev_err(dev, "cannot set EUI-48 address %s: %d\n", - devname, result); - return result; - } - } - uwb_dev->mac_addr = addr; - return 0; -} - - - -static int uwb_rc_setup(struct uwb_rc *rc) -{ - int result; - struct device *dev = &rc->uwb_dev.dev; - - result = uwb_radio_setup(rc); - if (result < 0) { - dev_err(dev, "cannot setup UWB radio: %d\n", result); - goto error; - } - result = uwb_rc_mac_addr_setup(rc); - if (result < 0) { - dev_err(dev, "cannot setup UWB MAC address: %d\n", result); - goto error; - } - result = uwb_rc_dev_addr_assign(rc); - if (result < 0) { - dev_err(dev, "cannot assign UWB DevAddr: %d\n", result); - goto error; - } - result = uwb_rc_ie_setup(rc); - if (result < 0) { - dev_err(dev, "cannot setup IE subsystem: %d\n", result); - goto error_ie_setup; - } - result = uwb_rsv_setup(rc); - if (result < 0) { - dev_err(dev, "cannot setup reservation subsystem: %d\n", result); - goto error_rsv_setup; - } - uwb_dbg_add_rc(rc); - return 0; - -error_rsv_setup: - uwb_rc_ie_release(rc); -error_ie_setup: -error: - return result; -} - - -/** - * Register a new UWB radio controller - * - * Did you call uwb_rc_init() on your rc? - * - * We assume that this is being called with a > 0 refcount on - * it [through ops->{get|put}_device(). We'll take our own, though. - * - * @parent_dev is our real device, the one that provides the actual UWB device - */ -int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) -{ - int result; - struct device *dev; - char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; - - rc->index = uwb_rc_new_index(); - - dev = &rc->uwb_dev.dev; - dev_set_name(dev, "uwb%d", rc->index); - - rc->priv = priv; - - init_waitqueue_head(&rc->uwbd.wq); - INIT_LIST_HEAD(&rc->uwbd.event_list); - spin_lock_init(&rc->uwbd.event_list_lock); - - uwbd_start(rc); - - result = rc->start(rc); - if (result < 0) - goto error_rc_start; - - result = uwb_rc_setup(rc); - if (result < 0) { - dev_err(dev, "cannot setup UWB radio controller: %d\n", result); - goto error_rc_setup; - } - - result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc); - if (result < 0 && result != -EADDRNOTAVAIL) - goto error_dev_add; - - result = uwb_rc_sys_add(rc); - if (result < 0) { - dev_err(parent_dev, "cannot register UWB radio controller " - "dev attributes: %d\n", result); - goto error_sys_add; - } - - uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr); - uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr); - dev_info(dev, - "new uwb radio controller (mac %s dev %s) on %s %s\n", - macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev)); - rc->ready = 1; - return 0; - -error_sys_add: - uwb_dev_rm(&rc->uwb_dev); -error_dev_add: -error_rc_setup: - rc->stop(rc); -error_rc_start: - uwbd_stop(rc); - return result; -} -EXPORT_SYMBOL_GPL(uwb_rc_add); - - -static int uwb_dev_offair_helper(struct device *dev, void *priv) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - - return __uwb_dev_offair(uwb_dev, uwb_dev->rc); -} - -/* - * Remove a Radio Controller; stop beaconing/scanning, disconnect all children - */ -void uwb_rc_rm(struct uwb_rc *rc) -{ - rc->ready = 0; - - uwb_dbg_del_rc(rc); - uwb_rsv_remove_all(rc); - uwb_radio_shutdown(rc); - - rc->stop(rc); - - uwbd_stop(rc); - uwb_rc_neh_destroy(rc); - - uwb_dev_lock(&rc->uwb_dev); - rc->priv = NULL; - rc->cmd = NULL; - uwb_dev_unlock(&rc->uwb_dev); - mutex_lock(&rc->uwb_beca.mutex); - uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); - __uwb_rc_sys_rm(rc); - mutex_unlock(&rc->uwb_beca.mutex); - uwb_rsv_cleanup(rc); - uwb_beca_release(rc); - uwb_dev_rm(&rc->uwb_dev); -} -EXPORT_SYMBOL_GPL(uwb_rc_rm); - -static int find_rc_try_get(struct device *dev, const void *data) -{ - const struct uwb_rc *target_rc = data; - struct uwb_rc *rc = dev_get_drvdata(dev); - - if (rc == NULL) { - WARN_ON(1); - return 0; - } - if (rc == target_rc) { - if (rc->ready == 0) - return 0; - else - return 1; - } - return 0; -} - -/** - * Given a radio controller descriptor, validate and refcount it - * - * @returns NULL if the rc does not exist or is quiescing; the ptr to - * it otherwise. - */ -struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) -{ - struct device *dev; - struct uwb_rc *rc = NULL; - - dev = class_find_device(&uwb_rc_class, NULL, target_rc, - find_rc_try_get); - if (dev) { - rc = dev_get_drvdata(dev); - __uwb_rc_get(rc); - put_device(dev); - } - - return rc; -} -EXPORT_SYMBOL_GPL(__uwb_rc_try_get); - -/* - * RC get for external refcount acquirers... - * - * Increments the refcount of the device and it's backend modules - */ -static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc) -{ - if (rc->ready == 0) - return NULL; - uwb_dev_get(&rc->uwb_dev); - return rc; -} - -static int find_rc_grandpa(struct device *dev, const void *data) -{ - const struct device *grandpa_dev = data; - struct uwb_rc *rc = dev_get_drvdata(dev); - - if (rc->uwb_dev.dev.parent->parent == grandpa_dev) { - rc = uwb_rc_get(rc); - return 1; - } - return 0; -} - -/** - * Locate and refcount a radio controller given a common grand-parent - * - * @grandpa_dev Pointer to the 'grandparent' device structure. - * @returns NULL If the rc does not exist or is quiescing; the ptr to - * it otherwise, properly referenced. - * - * The Radio Control interface (or the UWB Radio Controller) is always - * an interface of a device. The parent is the interface, the - * grandparent is the device that encapsulates the interface. - * - * There is no need to lock around as the "grandpa" would be - * refcounted by the target, and to remove the referemes, the - * uwb_rc_class->sem would have to be taken--we hold it, ergo we - * should be safe. - */ -struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) -{ - struct device *dev; - struct uwb_rc *rc = NULL; - - dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, - find_rc_grandpa); - if (dev) { - rc = dev_get_drvdata(dev); - put_device(dev); - } - - return rc; -} -EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); - -/** - * Find a radio controller by device address - * - * @returns the pointer to the radio controller, properly referenced - */ -static int find_rc_dev(struct device *dev, const void *data) -{ - const struct uwb_dev_addr *addr = data; - struct uwb_rc *rc = dev_get_drvdata(dev); - - if (rc == NULL) { - WARN_ON(1); - return 0; - } - if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) { - rc = uwb_rc_get(rc); - return 1; - } - return 0; -} - -struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) -{ - struct device *dev; - struct uwb_rc *rc = NULL; - - dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); - if (dev) { - rc = dev_get_drvdata(dev); - put_device(dev); - } - - return rc; -} -EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev); - -/** - * Drop a reference on a radio controller - * - * This is the version that should be done by entities external to the - * UWB Radio Control stack (ie: clients of the API). - */ -void uwb_rc_put(struct uwb_rc *rc) -{ - __uwb_rc_put(rc); -} -EXPORT_SYMBOL_GPL(uwb_rc_put); diff --git a/drivers/staging/uwb/neh.c b/drivers/staging/uwb/neh.c deleted file mode 100644 index 1695584be412..000000000000 --- a/drivers/staging/uwb/neh.c +++ /dev/null @@ -1,606 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * WUSB Wire Adapter: Radio Control Interface (WUSB[8]) - * Notification and Event Handling - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI - * card delivers a stream of notifications and events to the - * notification end event endpoint or area. This code takes care of - * getting a buffer with that data, breaking it up in separate - * notifications and events and then deliver those. - * - * Events are answers to commands and they carry a context ID that - * associates them to the command. Notifications are that, - * notifications, they come out of the blue and have a context ID of - * zero. Think of the context ID kind of like a handler. The - * uwb_rc_neh_* code deals with managing context IDs. - * - * This is why you require a handle to operate on a UWB host. When you - * open a handle a context ID is assigned to you. - * - * So, as it is done is: - * - * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id) - * 2. Issue command [rc->cmd(rc, ...)] - * 3. Arm the timeout timer [uwb_rc_neh_arm()] - * 4, Release the reference to the neh [uwb_rc_neh_put()] - * 5. Wait for the callback - * 6. Command result (RCEB) is passed to the callback - * - * If (2) fails, you should remove the handle [uwb_rc_neh_rm()] - * instead of arming the timer. - * - * Handles are for using in *serialized* code, single thread. - * - * When the notification/event comes, the IRQ handler/endpoint - * callback passes the data read to uwb_rc_neh_grok() which will break - * it up in a discrete series of events, look up who is listening for - * them and execute the pertinent callbacks. - * - * If the reader detects an error while reading the data stream, call - * uwb_rc_neh_error(). - * - * CONSTRAINTS/ASSUMPTIONS: - * - * - Most notifications/events are small (less thank .5k), copying - * around is ok. - * - * - Notifications/events are ALWAYS smaller than PAGE_SIZE - * - * - Notifications/events always come in a single piece (ie: a buffer - * will always contain entire notifications/events). - * - * - we cannot know in advance how long each event is (because they - * lack a length field in their header--smart move by the standards - * body, btw). So we need a facility to get the event size given the - * header. This is what the EST code does (notif/Event Size - * Tables), check nest.c--as well, you can associate the size to - * the handle [w/ neh->extra_size()]. - * - * - Most notifications/events are fixed size; only a few are variable - * size (NEST takes care of that). - * - * - Listeners of events expect them, so they usually provide a - * buffer, as they know the size. Listeners to notifications don't, - * so we allocate their buffers dynamically. - */ -#include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/export.h> - -#include "uwb-internal.h" - -/* - * UWB Radio Controller Notification/Event Handle - * - * Represents an entity waiting for an event coming from the UWB Radio - * Controller with a given context id (context) and type (evt_type and - * evt). On reception of the notification/event, the callback (cb) is - * called with the event. - * - * If the timer expires before the event is received, the callback is - * called with -ETIMEDOUT as the event size. - */ -struct uwb_rc_neh { - struct kref kref; - - struct uwb_rc *rc; - u8 evt_type; - __le16 evt; - u8 context; - u8 completed; - uwb_rc_cmd_cb_f cb; - void *arg; - - struct timer_list timer; - struct list_head list_node; -}; - -static void uwb_rc_neh_timer(struct timer_list *t); - -static void uwb_rc_neh_release(struct kref *kref) -{ - struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref); - - kfree(neh); -} - -static void uwb_rc_neh_get(struct uwb_rc_neh *neh) -{ - kref_get(&neh->kref); -} - -/** - * uwb_rc_neh_put - release reference to a neh - * @neh: the neh - */ -void uwb_rc_neh_put(struct uwb_rc_neh *neh) -{ - kref_put(&neh->kref, uwb_rc_neh_release); -} - - -/** - * Assigns @neh a context id from @rc's pool - * - * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken - * @neh: Notification/Event Handle - * @returns 0 if context id was assigned ok; < 0 errno on error (if - * all the context IDs are taken). - * - * (assumes @wa is locked). - * - * NOTE: WUSB spec reserves context ids 0x00 for notifications and - * 0xff is invalid, so they must not be used. Initialization - * fills up those two in the bitmap so they are not allocated. - * - * We spread the allocation around to reduce the possibility of two - * consecutive opened @neh's getting the same context ID assigned (to - * avoid surprises with late events that timed out long time ago). So - * first we search from where @rc->ctx_roll is, if not found, we - * search from zero. - */ -static -int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh) -{ - int result; - result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX, - rc->ctx_roll++); - if (result < UWB_RC_CTX_MAX) - goto found; - result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX); - if (result < UWB_RC_CTX_MAX) - goto found; - return -ENFILE; -found: - set_bit(result, rc->ctx_bm); - neh->context = result; - return 0; -} - - -/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */ -static -void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh) -{ - struct device *dev = &rc->uwb_dev.dev; - if (neh->context == 0) - return; - if (test_bit(neh->context, rc->ctx_bm) == 0) { - dev_err(dev, "context %u not set in bitmap\n", - neh->context); - WARN_ON(1); - } - clear_bit(neh->context, rc->ctx_bm); - neh->context = 0; -} - -/** - * uwb_rc_neh_add - add a neh for a radio controller command - * @rc: the radio controller - * @cmd: the radio controller command - * @expected_type: the type of the expected response event - * @expected_event: the expected event ID - * @cb: callback for when the event is received - * @arg: argument for the callback - * - * Creates a neh and adds it to the list of those waiting for an - * event. A context ID will be assigned to the command. - */ -struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg) -{ - int result; - unsigned long flags; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_neh *neh; - - neh = kzalloc(sizeof(*neh), GFP_KERNEL); - if (neh == NULL) { - result = -ENOMEM; - goto error_kzalloc; - } - - kref_init(&neh->kref); - INIT_LIST_HEAD(&neh->list_node); - timer_setup(&neh->timer, uwb_rc_neh_timer, 0); - - neh->rc = rc; - neh->evt_type = expected_type; - neh->evt = cpu_to_le16(expected_event); - neh->cb = cb; - neh->arg = arg; - - spin_lock_irqsave(&rc->neh_lock, flags); - result = __uwb_rc_ctx_get(rc, neh); - if (result >= 0) { - cmd->bCommandContext = neh->context; - list_add_tail(&neh->list_node, &rc->neh_list); - uwb_rc_neh_get(neh); - } - spin_unlock_irqrestore(&rc->neh_lock, flags); - if (result < 0) - goto error_ctx_get; - - return neh; - -error_ctx_get: - kfree(neh); -error_kzalloc: - dev_err(dev, "cannot open handle to radio controller: %d\n", result); - return ERR_PTR(result); -} - -static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) -{ - __uwb_rc_ctx_put(rc, neh); - list_del(&neh->list_node); -} - -/** - * uwb_rc_neh_rm - remove a neh. - * @rc: the radio controller - * @neh: the neh to remove - * - * Remove an active neh immediately instead of waiting for the event - * (or a time out). - */ -void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) -{ - unsigned long flags; - - spin_lock_irqsave(&rc->neh_lock, flags); - __uwb_rc_neh_rm(rc, neh); - spin_unlock_irqrestore(&rc->neh_lock, flags); - - del_timer_sync(&neh->timer); - uwb_rc_neh_put(neh); -} - -/** - * uwb_rc_neh_arm - arm an event handler timeout timer - * - * @rc: UWB Radio Controller - * @neh: Notification/event handler for @rc - * - * The timer is only armed if the neh is active. - */ -void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh) -{ - unsigned long flags; - - spin_lock_irqsave(&rc->neh_lock, flags); - if (neh->context) - mod_timer(&neh->timer, - jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS)); - spin_unlock_irqrestore(&rc->neh_lock, flags); -} - -static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size) -{ - (*neh->cb)(neh->rc, neh->arg, rceb, size); - uwb_rc_neh_put(neh); -} - -static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb) -{ - return neh->evt_type == rceb->bEventType - && neh->evt == rceb->wEvent - && neh->context == rceb->bEventContext; -} - -/** - * Find the handle waiting for a RC Radio Control Event - * - * @rc: UWB Radio Controller - * @rceb: Pointer to the RCEB buffer - * @event_size: Pointer to the size of the RCEB buffer. Might be - * adjusted to take into account the @neh->extra_size - * settings. - * - * If the listener has no buffer (NULL buffer), one is allocated for - * the right size (the amount of data received). @neh->ptr will point - * to the event payload, which always starts with a 'struct - * uwb_rceb'. kfree() it when done. - */ -static -struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc, - const struct uwb_rceb *rceb) -{ - struct uwb_rc_neh *neh = NULL, *h; - unsigned long flags; - - spin_lock_irqsave(&rc->neh_lock, flags); - - list_for_each_entry(h, &rc->neh_list, list_node) { - if (uwb_rc_neh_match(h, rceb)) { - neh = h; - break; - } - } - - if (neh) - __uwb_rc_neh_rm(rc, neh); - - spin_unlock_irqrestore(&rc->neh_lock, flags); - - return neh; -} - - -/* - * Process notifications coming from the radio control interface - * - * @rc: UWB Radio Control Interface descriptor - * @neh: Notification/Event Handler @neh->ptr points to - * @uwb_evt->buffer. - * - * This function is called by the event/notif handling subsystem when - * notifications arrive (hwarc_probe() arms a notification/event handle - * that calls back this function for every received notification; this - * function then will rearm itself). - * - * Notification data buffers are dynamically allocated by the NEH - * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually - * allocated is space to contain the notification data. - * - * Buffers are prefixed with a Radio Control Event Block (RCEB) as - * defined by the WUSB Wired-Adapter Radio Control interface. We - * just use it for the notification code. - * - * On each case statement we just transcode endianess of the different - * fields. We declare a pointer to a RCI definition of an event, and - * then to a UWB definition of the same event (which are the same, - * remember). Event if we use different pointers - */ -static -void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_event *uwb_evt; - - if (size == -ESHUTDOWN) - return; - if (size < 0) { - dev_err(dev, "ignoring event with error code %zu\n", - size); - return; - } - - uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC); - if (unlikely(uwb_evt == NULL)) { - dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n", - rceb->bEventType, le16_to_cpu(rceb->wEvent), - rceb->bEventContext); - return; - } - uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ - uwb_evt->ts_jiffies = jiffies; - uwb_evt->type = UWB_EVT_TYPE_NOTIF; - uwb_evt->notif.size = size; - uwb_evt->notif.rceb = rceb; - - uwbd_event_queue(uwb_evt); -} - -static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_neh *neh; - struct uwb_rceb *notif; - unsigned long flags; - - if (rceb->bEventContext == 0) { - notif = kmalloc(size, GFP_ATOMIC); - if (notif) { - memcpy(notif, rceb, size); - uwb_rc_notif(rc, notif, size); - } else - dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n", - rceb->bEventType, le16_to_cpu(rceb->wEvent), - rceb->bEventContext, size); - } else { - neh = uwb_rc_neh_lookup(rc, rceb); - if (neh) { - spin_lock_irqsave(&rc->neh_lock, flags); - /* to guard against a timeout */ - neh->completed = 1; - del_timer(&neh->timer); - spin_unlock_irqrestore(&rc->neh_lock, flags); - uwb_rc_neh_cb(neh, rceb, size); - } else - dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", - rceb->bEventType, le16_to_cpu(rceb->wEvent), - rceb->bEventContext, size); - } -} - -/** - * Given a buffer with one or more UWB RC events/notifications, break - * them up and dispatch them. - * - * @rc: UWB Radio Controller - * @buf: Buffer with the stream of notifications/events - * @buf_size: Amount of data in the buffer - * - * Note each notification/event starts always with a 'struct - * uwb_rceb', so the minimum size if 4 bytes. - * - * The device may pass us events formatted differently than expected. - * These are first filtered, potentially creating a new event in a new - * memory location. If a new event is created by the filter it is also - * freed here. - * - * For each notif/event, tries to guess the size looking at the EST - * tables, then looks for a neh that is waiting for that event and if - * found, copies the payload to the neh's buffer and calls it back. If - * not, the data is ignored. - * - * Note that if we can't find a size description in the EST tables, we - * still might find a size in the 'neh' handle in uwb_rc_neh_lookup(). - * - * Assumptions: - * - * @rc->neh_lock is NOT taken - * - * We keep track of various sizes here: - * size: contains the size of the buffer that is processed for the - * incoming event. this buffer may contain events that are not - * formatted as WHCI. - * real_size: the actual space taken by this event in the buffer. - * We need to keep track of the real size of an event to be able to - * advance the buffer correctly. - * event_size: the size of the event as expected by the core layer - * [OR] the size of the event after filtering. if the filtering - * created a new event in a new memory location then this is - * effectively the size of a new event buffer - */ -void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) -{ - struct device *dev = &rc->uwb_dev.dev; - void *itr; - struct uwb_rceb *rceb; - size_t size, real_size, event_size; - int needtofree; - - itr = buf; - size = buf_size; - while (size > 0) { - if (size < sizeof(*rceb)) { - dev_err(dev, "not enough data in event buffer to " - "process incoming events (%zu left, minimum is " - "%zu)\n", size, sizeof(*rceb)); - break; - } - - rceb = itr; - if (rc->filter_event) { - needtofree = rc->filter_event(rc, &rceb, size, - &real_size, &event_size); - if (needtofree < 0 && needtofree != -ENOANO) { - dev_err(dev, "BUG: Unable to filter event " - "(0x%02x/%04x/%02x) from " - "device. \n", rceb->bEventType, - le16_to_cpu(rceb->wEvent), - rceb->bEventContext); - break; - } - } else - needtofree = -ENOANO; - /* do real processing if there was no filtering or the - * filtering didn't act */ - if (needtofree == -ENOANO) { - ssize_t ret = uwb_est_find_size(rc, rceb, size); - if (ret < 0) - break; - if (ret > size) { - dev_err(dev, "BUG: hw sent incomplete event " - "0x%02x/%04x/%02x (%zd bytes), only got " - "%zu bytes. We don't handle that.\n", - rceb->bEventType, le16_to_cpu(rceb->wEvent), - rceb->bEventContext, ret, size); - break; - } - real_size = event_size = ret; - } - uwb_rc_neh_grok_event(rc, rceb, event_size); - - if (needtofree == 1) - kfree(rceb); - - itr += real_size; - size -= real_size; - } -} -EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); - - -/** - * The entity that reads from the device notification/event channel has - * detected an error. - * - * @rc: UWB Radio Controller - * @error: Errno error code - * - */ -void uwb_rc_neh_error(struct uwb_rc *rc, int error) -{ - struct uwb_rc_neh *neh; - unsigned long flags; - - for (;;) { - spin_lock_irqsave(&rc->neh_lock, flags); - if (list_empty(&rc->neh_list)) { - spin_unlock_irqrestore(&rc->neh_lock, flags); - break; - } - neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); - __uwb_rc_neh_rm(rc, neh); - spin_unlock_irqrestore(&rc->neh_lock, flags); - - del_timer_sync(&neh->timer); - uwb_rc_neh_cb(neh, NULL, error); - } -} -EXPORT_SYMBOL_GPL(uwb_rc_neh_error); - - -static void uwb_rc_neh_timer(struct timer_list *t) -{ - struct uwb_rc_neh *neh = from_timer(neh, t, timer); - struct uwb_rc *rc = neh->rc; - unsigned long flags; - - spin_lock_irqsave(&rc->neh_lock, flags); - if (neh->completed) { - spin_unlock_irqrestore(&rc->neh_lock, flags); - return; - } - if (neh->context) - __uwb_rc_neh_rm(rc, neh); - else - neh = NULL; - spin_unlock_irqrestore(&rc->neh_lock, flags); - - if (neh) - uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); -} - -/** Initializes the @rc's neh subsystem - */ -void uwb_rc_neh_create(struct uwb_rc *rc) -{ - spin_lock_init(&rc->neh_lock); - INIT_LIST_HEAD(&rc->neh_list); - set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */ - set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */ - rc->ctx_roll = 1; -} - - -/** Release's the @rc's neh subsystem */ -void uwb_rc_neh_destroy(struct uwb_rc *rc) -{ - unsigned long flags; - struct uwb_rc_neh *neh; - - for (;;) { - spin_lock_irqsave(&rc->neh_lock, flags); - if (list_empty(&rc->neh_list)) { - spin_unlock_irqrestore(&rc->neh_lock, flags); - break; - } - neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); - __uwb_rc_neh_rm(rc, neh); - spin_unlock_irqrestore(&rc->neh_lock, flags); - - del_timer_sync(&neh->timer); - uwb_rc_neh_put(neh); - } -} diff --git a/drivers/staging/uwb/pal.c b/drivers/staging/uwb/pal.c deleted file mode 100644 index a541e646a603..000000000000 --- a/drivers/staging/uwb/pal.c +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB PAL support. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/debugfs.h> -#include <linux/export.h> - -#include "uwb.h" -#include "uwb-internal.h" - -/** - * uwb_pal_init - initialize a UWB PAL - * @pal: the PAL to initialize - */ -void uwb_pal_init(struct uwb_pal *pal) -{ - INIT_LIST_HEAD(&pal->node); -} -EXPORT_SYMBOL_GPL(uwb_pal_init); - -/** - * uwb_pal_register - register a UWB PAL - * @pal: the PAL - * - * The PAL must be initialized with uwb_pal_init(). - */ -int uwb_pal_register(struct uwb_pal *pal) -{ - struct uwb_rc *rc = pal->rc; - int ret; - - if (pal->device) { - /* create a link to the uwb_rc in the PAL device's directory. */ - ret = sysfs_create_link(&pal->device->kobj, - &rc->uwb_dev.dev.kobj, "uwb_rc"); - if (ret < 0) - return ret; - /* create a link to the PAL in the UWB device's directory. */ - ret = sysfs_create_link(&rc->uwb_dev.dev.kobj, - &pal->device->kobj, pal->name); - if (ret < 0) { - sysfs_remove_link(&pal->device->kobj, "uwb_rc"); - return ret; - } - } - - pal->debugfs_dir = uwb_dbg_create_pal_dir(pal); - - mutex_lock(&rc->uwb_dev.mutex); - list_add(&pal->node, &rc->pals); - mutex_unlock(&rc->uwb_dev.mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(uwb_pal_register); - -static int find_rc(struct device *dev, const void *data) -{ - const struct uwb_rc *target_rc = data; - struct uwb_rc *rc = dev_get_drvdata(dev); - - if (rc == NULL) { - WARN_ON(1); - return 0; - } - if (rc == target_rc) { - if (rc->ready == 0) - return 0; - else - return 1; - } - return 0; -} - -/** - * Given a radio controller descriptor see if it is registered. - * - * @returns false if the rc does not exist or is quiescing; true otherwise. - */ -static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc) -{ - struct device *dev; - - dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); - - put_device(dev); - - return (dev != NULL); -} - -/** - * uwb_pal_unregister - unregister a UWB PAL - * @pal: the PAL - */ -void uwb_pal_unregister(struct uwb_pal *pal) -{ - struct uwb_rc *rc = pal->rc; - - uwb_radio_stop(pal); - - mutex_lock(&rc->uwb_dev.mutex); - list_del(&pal->node); - mutex_unlock(&rc->uwb_dev.mutex); - - debugfs_remove(pal->debugfs_dir); - - if (pal->device) { - /* remove link to the PAL in the UWB device's directory. */ - if (uwb_rc_class_device_exists(rc)) - sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); - - /* remove link to uwb_rc in the PAL device's directory. */ - sysfs_remove_link(&pal->device->kobj, "uwb_rc"); - } -} -EXPORT_SYMBOL_GPL(uwb_pal_unregister); - -/** - * uwb_rc_pal_init - initialize the PAL related parts of a radio controller - * @rc: the radio controller - */ -void uwb_rc_pal_init(struct uwb_rc *rc) -{ - INIT_LIST_HEAD(&rc->pals); -} diff --git a/drivers/staging/uwb/radio.c b/drivers/staging/uwb/radio.c deleted file mode 100644 index 6afb75ce1b5f..000000000000 --- a/drivers/staging/uwb/radio.c +++ /dev/null @@ -1,196 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB radio (channel) management. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/export.h> - -#include "uwb.h" -#include "uwb-internal.h" - - -static int uwb_radio_select_channel(struct uwb_rc *rc) -{ - /* - * Default to channel 9 (BG1, TFC1) unless the user has - * selected a specific channel or there are no active PALs. - */ - if (rc->active_pals == 0) - return -1; - if (rc->beaconing_forced) - return rc->beaconing_forced; - return 9; -} - - -/* - * Notify all active PALs that the channel has changed. - */ -static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel) -{ - struct uwb_pal *pal; - - list_for_each_entry(pal, &rc->pals, node) { - if (pal->channel && channel != pal->channel) { - pal->channel = channel; - if (pal->channel_changed) - pal->channel_changed(pal, pal->channel); - } - } -} - -/* - * Change to a new channel and notify any active PALs of the new - * channel. - * - * When stopping the radio, PALs need to be notified first so they can - * terminate any active reservations. - */ -static int uwb_radio_change_channel(struct uwb_rc *rc, int channel) -{ - int ret = 0; - struct device *dev = &rc->uwb_dev.dev; - - dev_dbg(dev, "%s: channel = %d, rc->beaconing = %d\n", __func__, - channel, rc->beaconing); - - if (channel == -1) - uwb_radio_channel_changed(rc, channel); - - if (channel != rc->beaconing) { - if (rc->beaconing != -1 && channel != -1) { - /* - * FIXME: should signal the channel change - * with a Channel Change IE. - */ - ret = uwb_radio_change_channel(rc, -1); - if (ret < 0) - return ret; - } - ret = uwb_rc_beacon(rc, channel, 0); - } - - if (channel != -1) - uwb_radio_channel_changed(rc, rc->beaconing); - - return ret; -} - -/** - * uwb_radio_start - request that the radio be started - * @pal: the PAL making the request. - * - * If the radio is not already active, a suitable channel is selected - * and beacons are started. - */ -int uwb_radio_start(struct uwb_pal *pal) -{ - struct uwb_rc *rc = pal->rc; - int ret = 0; - - mutex_lock(&rc->uwb_dev.mutex); - - if (!pal->channel) { - pal->channel = -1; - rc->active_pals++; - ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); - } - - mutex_unlock(&rc->uwb_dev.mutex); - return ret; -} -EXPORT_SYMBOL_GPL(uwb_radio_start); - -/** - * uwb_radio_stop - request that the radio be stopped. - * @pal: the PAL making the request. - * - * Stops the radio if no other PAL is making use of it. - */ -void uwb_radio_stop(struct uwb_pal *pal) -{ - struct uwb_rc *rc = pal->rc; - - mutex_lock(&rc->uwb_dev.mutex); - - if (pal->channel) { - rc->active_pals--; - uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); - pal->channel = 0; - } - - mutex_unlock(&rc->uwb_dev.mutex); -} -EXPORT_SYMBOL_GPL(uwb_radio_stop); - -/* - * uwb_radio_force_channel - force a specific channel to be used - * @rc: the radio controller. - * @channel: the channel to use; -1 to force the radio to stop; 0 to - * use the default channel selection algorithm. - */ -int uwb_radio_force_channel(struct uwb_rc *rc, int channel) -{ - int ret = 0; - - mutex_lock(&rc->uwb_dev.mutex); - - rc->beaconing_forced = channel; - ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); - - mutex_unlock(&rc->uwb_dev.mutex); - return ret; -} - -/* - * uwb_radio_setup - setup the radio manager - * @rc: the radio controller. - * - * The radio controller is reset to ensure it's in a known state - * before it's used. - */ -int uwb_radio_setup(struct uwb_rc *rc) -{ - return uwb_rc_reset(rc); -} - -/* - * uwb_radio_reset_state - reset any radio manager state - * @rc: the radio controller. - * - * All internal radio manager state is reset to values corresponding - * to a reset radio controller. - */ -void uwb_radio_reset_state(struct uwb_rc *rc) -{ - struct uwb_pal *pal; - - mutex_lock(&rc->uwb_dev.mutex); - - list_for_each_entry(pal, &rc->pals, node) { - if (pal->channel) { - pal->channel = -1; - if (pal->channel_changed) - pal->channel_changed(pal, -1); - } - } - - rc->beaconing = -1; - rc->scanning = -1; - - mutex_unlock(&rc->uwb_dev.mutex); -} - -/* - * uwb_radio_shutdown - shutdown the radio manager - * @rc: the radio controller. - * - * The radio controller is reset. - */ -void uwb_radio_shutdown(struct uwb_rc *rc) -{ - uwb_radio_reset_state(rc); - uwb_rc_reset(rc); -} diff --git a/drivers/staging/uwb/reset.c b/drivers/staging/uwb/reset.c deleted file mode 100644 index 8fc7c14d903e..000000000000 --- a/drivers/staging/uwb/reset.c +++ /dev/null @@ -1,379 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * UWB basic command support and radio reset - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: - * - * - docs - * - * - Now we are serializing (using the uwb_dev->mutex) the command - * execution; it should be parallelized as much as possible some - * day. - */ -#include <linux/kernel.h> -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/export.h> - -#include "uwb-internal.h" - -/** - * Command result codes (WUSB1.0[T8-69]) - */ -static -const char *__strerror[] = { - "success", - "failure", - "hardware failure", - "no more slots", - "beacon is too large", - "invalid parameter", - "unsupported power level", - "time out (wa) or invalid ie data (whci)", - "beacon size exceeded", - "cancelled", - "invalid state", - "invalid size", - "ack not received", - "no more asie notification", -}; - - -/** Return a string matching the given error code */ -const char *uwb_rc_strerror(unsigned code) -{ - if (code == 255) - return "time out"; - if (code >= ARRAY_SIZE(__strerror)) - return "unknown error"; - return __strerror[code]; -} - -int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg) -{ - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_neh *neh; - int needtofree = 0; - int result; - - uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */ - if (rc->priv == NULL) { - uwb_dev_unlock(&rc->uwb_dev); - return -ESHUTDOWN; - } - - if (rc->filter_cmd) { - needtofree = rc->filter_cmd(rc, &cmd, &cmd_size); - if (needtofree < 0 && needtofree != -ENOANO) { - dev_err(dev, "%s: filter error: %d\n", - cmd_name, needtofree); - uwb_dev_unlock(&rc->uwb_dev); - return needtofree; - } - } - - neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg); - if (IS_ERR(neh)) { - result = PTR_ERR(neh); - uwb_dev_unlock(&rc->uwb_dev); - goto out; - } - - result = rc->cmd(rc, cmd, cmd_size); - uwb_dev_unlock(&rc->uwb_dev); - if (result < 0) - uwb_rc_neh_rm(rc, neh); - else - uwb_rc_neh_arm(rc, neh); - uwb_rc_neh_put(neh); -out: - if (needtofree == 1) - kfree(cmd); - return result < 0 ? result : 0; -} -EXPORT_SYMBOL_GPL(uwb_rc_cmd_async); - -struct uwb_rc_cmd_done_params { - struct completion completion; - struct uwb_rceb *reply; - ssize_t reply_size; -}; - -static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg, - struct uwb_rceb *reply, ssize_t reply_size) -{ - struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg; - - if (reply_size > 0) { - if (p->reply) - reply_size = min(p->reply_size, reply_size); - else - p->reply = kmalloc(reply_size, GFP_ATOMIC); - - if (p->reply) - memcpy(p->reply, reply, reply_size); - else - reply_size = -ENOMEM; - } - p->reply_size = reply_size; - complete(&p->completion); -} - - -/** - * Generic function for issuing commands to the Radio Control Interface - * - * @rc: UWB Radio Control descriptor - * @cmd_name: Name of the command being issued (for error messages) - * @cmd: Pointer to rccb structure containing the command; - * normally you embed this structure as the first member of - * the full command structure. - * @cmd_size: Size of the whole command buffer pointed to by @cmd. - * @reply: Pointer to where to store the reply - * @reply_size: @reply's size - * @expected_type: Expected type in the return event - * @expected_event: Expected event code in the return event - * @preply: Here a pointer to where the event data is received will - * be stored. Once done with the data, free with kfree(). - * - * This function is generic; it works for commands that return a fixed - * and known size or for commands that return a variable amount of data. - * - * If a buffer is provided, that is used, although it could be chopped - * to the maximum size of the buffer. If the buffer is NULL, then one - * be allocated in *preply with the whole contents of the reply. - * - * @rc needs to be referenced - */ -static -ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - struct uwb_rceb *reply, size_t reply_size, - u8 expected_type, u16 expected_event, - struct uwb_rceb **preply) -{ - ssize_t result = 0; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rc_cmd_done_params params; - - init_completion(¶ms.completion); - params.reply = reply; - params.reply_size = reply_size; - - result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size, - expected_type, expected_event, - uwb_rc_cmd_done, ¶ms); - if (result) - return result; - - wait_for_completion(¶ms.completion); - - if (preply) - *preply = params.reply; - - if (params.reply_size < 0) - dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x " - "reception failed: %d\n", cmd_name, - expected_type, expected_event, cmd->bCommandContext, - (int)params.reply_size); - return params.reply_size; -} - - -/** - * Generic function for issuing commands to the Radio Control Interface - * - * @rc: UWB Radio Control descriptor - * @cmd_name: Name of the command being issued (for error messages) - * @cmd: Pointer to rccb structure containing the command; - * normally you embed this structure as the first member of - * the full command structure. - * @cmd_size: Size of the whole command buffer pointed to by @cmd. - * @reply: Pointer to the beginning of the confirmation event - * buffer. Normally bigger than an 'struct hwarc_rceb'. - * You need to fill out reply->bEventType and reply->wEvent (in - * cpu order) as the function will use them to verify the - * confirmation event. - * @reply_size: Size of the reply buffer - * - * The function checks that the length returned in the reply is at - * least as big as @reply_size; if not, it will be deemed an error and - * -EIO returned. - * - * @rc needs to be referenced - */ -ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - struct uwb_rceb *reply, size_t reply_size) -{ - struct device *dev = &rc->uwb_dev.dev; - ssize_t result; - - result = __uwb_rc_cmd(rc, cmd_name, - cmd, cmd_size, reply, reply_size, - reply->bEventType, reply->wEvent, NULL); - - if (result > 0 && result < reply_size) { - dev_err(dev, "%s: not enough data returned for decoding reply " - "(%zu bytes received vs at least %zu needed)\n", - cmd_name, result, reply_size); - result = -EIO; - } - return result; -} -EXPORT_SYMBOL_GPL(uwb_rc_cmd); - - -/** - * Generic function for issuing commands to the Radio Control - * Interface that return an unknown amount of data - * - * @rc: UWB Radio Control descriptor - * @cmd_name: Name of the command being issued (for error messages) - * @cmd: Pointer to rccb structure containing the command; - * normally you embed this structure as the first member of - * the full command structure. - * @cmd_size: Size of the whole command buffer pointed to by @cmd. - * @expected_type: Expected type in the return event - * @expected_event: Expected event code in the return event - * @preply: Here a pointer to where the event data is received will - * be stored. Once done with the data, free with kfree(). - * - * The function checks that the length returned in the reply is at - * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an - * error and -EIO returned. - * - * @rc needs to be referenced - */ -ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - struct uwb_rceb **preply) -{ - return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0, - expected_type, expected_event, preply); -} -EXPORT_SYMBOL_GPL(uwb_rc_vcmd); - - -/** - * Reset a UWB Host Controller (and all radio settings) - * - * @rc: Host Controller descriptor - * @returns: 0 if ok, < 0 errno code on error - * - * We put the command on kmalloc'ed memory as some arches cannot do - * USB from the stack. The reply event is copied from an stage buffer, - * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. - */ -int uwb_rc_reset(struct uwb_rc *rc) -{ - int result = -ENOMEM; - struct uwb_rc_evt_confirm reply; - struct uwb_rccb *cmd; - size_t cmd_size = sizeof(*cmd); - - mutex_lock(&rc->uwb_dev.mutex); - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_kzalloc; - cmd->bCommandType = UWB_RC_CET_GENERAL; - cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); - reply.rceb.bEventType = UWB_RC_CET_GENERAL; - reply.rceb.wEvent = UWB_RC_CMD_RESET; - result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size, - &reply.rceb, sizeof(reply)); - if (result < 0) - goto error_cmd; - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(&rc->uwb_dev.dev, - "RESET: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply.bResultCode), reply.bResultCode); - result = -EIO; - } -error_cmd: - kfree(cmd); -error_kzalloc: - mutex_unlock(&rc->uwb_dev.mutex); - return result; -} - -int uwbd_msg_handle_reset(struct uwb_event *evt) -{ - struct uwb_rc *rc = evt->rc; - int ret; - - dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); - ret = rc->reset(rc); - if (ret < 0) { - dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); - goto error; - } - return 0; -error: - /* Nothing can be done except try the reset again. Wait a bit - to avoid reset loops during probe() or remove(). */ - msleep(1000); - uwb_rc_reset_all(rc); - return ret; -} - -/** - * uwb_rc_reset_all - request a reset of the radio controller and PALs - * @rc: the radio controller of the hardware device to be reset. - * - * The full hardware reset of the radio controller and all the PALs - * will be scheduled. - */ -void uwb_rc_reset_all(struct uwb_rc *rc) -{ - struct uwb_event *evt; - - evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC); - if (unlikely(evt == NULL)) - return; - - evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ - evt->ts_jiffies = jiffies; - evt->type = UWB_EVT_TYPE_MSG; - evt->message = UWB_EVT_MSG_RESET; - - uwbd_event_queue(evt); -} -EXPORT_SYMBOL_GPL(uwb_rc_reset_all); - -void uwb_rc_pre_reset(struct uwb_rc *rc) -{ - rc->stop(rc); - uwbd_flush(rc); - - uwb_radio_reset_state(rc); - uwb_rsv_remove_all(rc); -} -EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); - -int uwb_rc_post_reset(struct uwb_rc *rc) -{ - int ret; - - ret = rc->start(rc); - if (ret) - goto out; - ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); - if (ret) - goto out; - ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); - if (ret) - goto out; -out: - return ret; -} -EXPORT_SYMBOL_GPL(uwb_rc_post_reset); diff --git a/drivers/staging/uwb/rsv.c b/drivers/staging/uwb/rsv.c deleted file mode 100644 index d593a41c3d8d..000000000000 --- a/drivers/staging/uwb/rsv.c +++ /dev/null @@ -1,1000 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB reservation management. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/random.h> -#include <linux/export.h> - -#include "uwb.h" -#include "uwb-internal.h" - -static void uwb_rsv_timer(struct timer_list *t); - -static const char *rsv_states[] = { - [UWB_RSV_STATE_NONE] = "none ", - [UWB_RSV_STATE_O_INITIATED] = "o initiated ", - [UWB_RSV_STATE_O_PENDING] = "o pending ", - [UWB_RSV_STATE_O_MODIFIED] = "o modified ", - [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", - [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", - [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", - [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", - [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", - [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", - [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", - [UWB_RSV_STATE_T_PENDING] = "t pending ", - [UWB_RSV_STATE_T_DENIED] = "t denied ", - [UWB_RSV_STATE_T_RESIZED] = "t resized ", - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", - [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", - [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", -}; - -static const char *rsv_types[] = { - [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", - [UWB_DRP_TYPE_HARD] = "hard", - [UWB_DRP_TYPE_SOFT] = "soft", - [UWB_DRP_TYPE_PRIVATE] = "private", - [UWB_DRP_TYPE_PCA] = "pca", -}; - -bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) -{ - static const bool has_two_drp_ies[] = { - [UWB_RSV_STATE_O_INITIATED] = false, - [UWB_RSV_STATE_O_PENDING] = false, - [UWB_RSV_STATE_O_MODIFIED] = false, - [UWB_RSV_STATE_O_ESTABLISHED] = false, - [UWB_RSV_STATE_O_TO_BE_MOVED] = false, - [UWB_RSV_STATE_O_MOVE_COMBINING] = false, - [UWB_RSV_STATE_O_MOVE_REDUCING] = false, - [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, - [UWB_RSV_STATE_T_ACCEPTED] = false, - [UWB_RSV_STATE_T_CONFLICT] = false, - [UWB_RSV_STATE_T_PENDING] = false, - [UWB_RSV_STATE_T_DENIED] = false, - [UWB_RSV_STATE_T_RESIZED] = false, - [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, - [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, - [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, - [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, - }; - - return has_two_drp_ies[rsv->state]; -} - -/** - * uwb_rsv_state_str - return a string for a reservation state - * @state: the reservation state. - */ -const char *uwb_rsv_state_str(enum uwb_rsv_state state) -{ - if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) - return "unknown"; - return rsv_states[state]; -} -EXPORT_SYMBOL_GPL(uwb_rsv_state_str); - -/** - * uwb_rsv_type_str - return a string for a reservation type - * @type: the reservation type - */ -const char *uwb_rsv_type_str(enum uwb_drp_type type) -{ - if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) - return "invalid"; - return rsv_types[type]; -} -EXPORT_SYMBOL_GPL(uwb_rsv_type_str); - -void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) -{ - struct device *dev = &rsv->rc->uwb_dev.dev; - struct uwb_dev_addr devaddr; - char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; - - uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); - if (rsv->target.type == UWB_RSV_TARGET_DEV) - devaddr = rsv->target.dev->dev_addr; - else - devaddr = rsv->target.devaddr; - uwb_dev_addr_print(target, sizeof(target), &devaddr); - - dev_dbg(dev, "rsv %s %s -> %s: %s\n", - text, owner, target, uwb_rsv_state_str(rsv->state)); -} - -static void uwb_rsv_release(struct kref *kref) -{ - struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); - - kfree(rsv); -} - -void uwb_rsv_get(struct uwb_rsv *rsv) -{ - kref_get(&rsv->kref); -} - -void uwb_rsv_put(struct uwb_rsv *rsv) -{ - kref_put(&rsv->kref, uwb_rsv_release); -} - -/* - * Get a free stream index for a reservation. - * - * If the target is a DevAddr (e.g., a WUSB cluster reservation) then - * the stream is allocated from a pool of per-RC stream indexes, - * otherwise a unique stream index for the target is selected. - */ -static int uwb_rsv_get_stream(struct uwb_rsv *rsv) -{ - struct uwb_rc *rc = rsv->rc; - struct device *dev = &rc->uwb_dev.dev; - unsigned long *streams_bm; - int stream; - - switch (rsv->target.type) { - case UWB_RSV_TARGET_DEV: - streams_bm = rsv->target.dev->streams; - break; - case UWB_RSV_TARGET_DEVADDR: - streams_bm = rc->uwb_dev.streams; - break; - default: - return -EINVAL; - } - - stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); - if (stream >= UWB_NUM_STREAMS) { - dev_err(dev, "%s: no available stream found\n", __func__); - return -EBUSY; - } - - rsv->stream = stream; - set_bit(stream, streams_bm); - - dev_dbg(dev, "get stream %d\n", rsv->stream); - - return 0; -} - -static void uwb_rsv_put_stream(struct uwb_rsv *rsv) -{ - struct uwb_rc *rc = rsv->rc; - struct device *dev = &rc->uwb_dev.dev; - unsigned long *streams_bm; - - switch (rsv->target.type) { - case UWB_RSV_TARGET_DEV: - streams_bm = rsv->target.dev->streams; - break; - case UWB_RSV_TARGET_DEVADDR: - streams_bm = rc->uwb_dev.streams; - break; - default: - return; - } - - clear_bit(rsv->stream, streams_bm); - - dev_dbg(dev, "put stream %d\n", rsv->stream); -} - -void uwb_rsv_backoff_win_timer(struct timer_list *t) -{ - struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer); - struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); - struct device *dev = &rc->uwb_dev.dev; - - bow->can_reserve_extra_mases = true; - if (bow->total_expired <= 4) { - bow->total_expired++; - } else { - /* after 4 backoff window has expired we can exit from - * the backoff procedure */ - bow->total_expired = 0; - bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; - } - dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n); - - /* try to relocate all the "to be moved" relocations */ - uwb_rsv_handle_drp_avail_change(rc); -} - -void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) -{ - struct uwb_drp_backoff_win *bow = &rc->bow; - struct device *dev = &rc->uwb_dev.dev; - unsigned timeout_us; - - dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); - - bow->can_reserve_extra_mases = false; - - if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) - return; - - bow->window <<= 1; - bow->n = prandom_u32() & (bow->window - 1); - dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n); - - /* reset the timer associated variables */ - timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; - bow->total_expired = 0; - mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); -} - -static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) -{ - int sframes = UWB_MAX_LOST_BEACONS; - - /* - * Multicast reservations can become established within 1 - * super frame and should not be terminated if no response is - * received. - */ - if (rsv->state == UWB_RSV_STATE_NONE) { - sframes = 0; - } else if (rsv->is_multicast) { - if (rsv->state == UWB_RSV_STATE_O_INITIATED - || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING - || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING - || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) - sframes = 1; - if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) - sframes = 0; - - } - - if (sframes > 0) { - /* - * Add an additional 2 superframes to account for the - * time to send the SET DRP IE command. - */ - unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; - mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); - } else - del_timer(&rsv->timer); -} - -/* - * Update a reservations state, and schedule an update of the - * transmitted DRP IEs. - */ -static void uwb_rsv_state_update(struct uwb_rsv *rsv, - enum uwb_rsv_state new_state) -{ - rsv->state = new_state; - rsv->ie_valid = false; - - uwb_rsv_dump("SU", rsv); - - uwb_rsv_stroke_timer(rsv); - uwb_rsv_sched_update(rsv->rc); -} - -static void uwb_rsv_callback(struct uwb_rsv *rsv) -{ - if (rsv->callback) - rsv->callback(rsv); -} - -void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) -{ - struct uwb_rsv_move *mv = &rsv->mv; - - if (rsv->state == new_state) { - switch (rsv->state) { - case UWB_RSV_STATE_O_ESTABLISHED: - case UWB_RSV_STATE_O_MOVE_EXPANDING: - case UWB_RSV_STATE_O_MOVE_COMBINING: - case UWB_RSV_STATE_O_MOVE_REDUCING: - case UWB_RSV_STATE_T_ACCEPTED: - case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: - case UWB_RSV_STATE_T_RESIZED: - case UWB_RSV_STATE_NONE: - uwb_rsv_stroke_timer(rsv); - break; - default: - /* Expecting a state transition so leave timer - as-is. */ - break; - } - return; - } - - uwb_rsv_dump("SC", rsv); - - switch (new_state) { - case UWB_RSV_STATE_NONE: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); - uwb_rsv_remove(rsv); - uwb_rsv_callback(rsv); - break; - case UWB_RSV_STATE_O_INITIATED: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); - break; - case UWB_RSV_STATE_O_PENDING: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); - break; - case UWB_RSV_STATE_O_MODIFIED: - /* in the companion there are the MASes to drop */ - bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); - break; - case UWB_RSV_STATE_O_ESTABLISHED: - if (rsv->state == UWB_RSV_STATE_O_MODIFIED - || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { - uwb_drp_avail_release(rsv->rc, &mv->companion_mas); - rsv->needs_release_companion_mas = false; - } - uwb_drp_avail_reserve(rsv->rc, &rsv->mas); - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); - uwb_rsv_callback(rsv); - break; - case UWB_RSV_STATE_O_MOVE_EXPANDING: - rsv->needs_release_companion_mas = true; - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); - break; - case UWB_RSV_STATE_O_MOVE_COMBINING: - rsv->needs_release_companion_mas = false; - uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); - bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); - rsv->mas.safe += mv->companion_mas.safe; - rsv->mas.unsafe += mv->companion_mas.unsafe; - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); - break; - case UWB_RSV_STATE_O_MOVE_REDUCING: - bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); - rsv->needs_release_companion_mas = true; - rsv->mas.safe = mv->final_mas.safe; - rsv->mas.unsafe = mv->final_mas.unsafe; - bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); - bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); - uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); - break; - case UWB_RSV_STATE_T_ACCEPTED: - case UWB_RSV_STATE_T_RESIZED: - rsv->needs_release_companion_mas = false; - uwb_drp_avail_reserve(rsv->rc, &rsv->mas); - uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); - uwb_rsv_callback(rsv); - break; - case UWB_RSV_STATE_T_DENIED: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); - break; - case UWB_RSV_STATE_T_CONFLICT: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); - break; - case UWB_RSV_STATE_T_PENDING: - uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); - break; - case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: - rsv->needs_release_companion_mas = true; - uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); - uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); - break; - default: - dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", - uwb_rsv_state_str(new_state), new_state); - } -} - -static void uwb_rsv_handle_timeout_work(struct work_struct *work) -{ - struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, - handle_timeout_work); - struct uwb_rc *rc = rsv->rc; - - mutex_lock(&rc->rsvs_mutex); - - uwb_rsv_dump("TO", rsv); - - switch (rsv->state) { - case UWB_RSV_STATE_O_INITIATED: - if (rsv->is_multicast) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); - goto unlock; - } - break; - case UWB_RSV_STATE_O_MOVE_EXPANDING: - if (rsv->is_multicast) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); - goto unlock; - } - break; - case UWB_RSV_STATE_O_MOVE_COMBINING: - if (rsv->is_multicast) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); - goto unlock; - } - break; - case UWB_RSV_STATE_O_MOVE_REDUCING: - if (rsv->is_multicast) { - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); - goto unlock; - } - break; - case UWB_RSV_STATE_O_ESTABLISHED: - if (rsv->is_multicast) - goto unlock; - break; - case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: - /* - * The time out could be for the main or of the - * companion DRP, assume it's for the companion and - * drop that first. A further time out is required to - * drop the main. - */ - uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); - uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); - goto unlock; - case UWB_RSV_STATE_NONE: - goto unlock; - default: - break; - } - - uwb_rsv_remove(rsv); - -unlock: - mutex_unlock(&rc->rsvs_mutex); -} - -static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) -{ - struct uwb_rsv *rsv; - - rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); - if (!rsv) - return NULL; - - INIT_LIST_HEAD(&rsv->rc_node); - INIT_LIST_HEAD(&rsv->pal_node); - kref_init(&rsv->kref); - timer_setup(&rsv->timer, uwb_rsv_timer, 0); - - rsv->rc = rc; - INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); - - return rsv; -} - -/** - * uwb_rsv_create - allocate and initialize a UWB reservation structure - * @rc: the radio controller - * @cb: callback to use when the reservation completes or terminates - * @pal_priv: data private to the PAL to be passed in the callback - * - * The callback is called when the state of the reservation changes from: - * - * - pending to accepted - * - pending to denined - * - accepted to terminated - * - pending to terminated - */ -struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) -{ - struct uwb_rsv *rsv; - - rsv = uwb_rsv_alloc(rc); - if (!rsv) - return NULL; - - rsv->callback = cb; - rsv->pal_priv = pal_priv; - - return rsv; -} -EXPORT_SYMBOL_GPL(uwb_rsv_create); - -void uwb_rsv_remove(struct uwb_rsv *rsv) -{ - uwb_rsv_dump("RM", rsv); - - if (rsv->state != UWB_RSV_STATE_NONE) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); - - if (rsv->needs_release_companion_mas) - uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); - uwb_drp_avail_release(rsv->rc, &rsv->mas); - - if (uwb_rsv_is_owner(rsv)) - uwb_rsv_put_stream(rsv); - - uwb_dev_put(rsv->owner); - if (rsv->target.type == UWB_RSV_TARGET_DEV) - uwb_dev_put(rsv->target.dev); - - list_del_init(&rsv->rc_node); - uwb_rsv_put(rsv); -} - -/** - * uwb_rsv_destroy - free a UWB reservation structure - * @rsv: the reservation to free - * - * The reservation must already be terminated. - */ -void uwb_rsv_destroy(struct uwb_rsv *rsv) -{ - uwb_rsv_put(rsv); -} -EXPORT_SYMBOL_GPL(uwb_rsv_destroy); - -/** - * usb_rsv_establish - start a reservation establishment - * @rsv: the reservation - * - * The PAL should fill in @rsv's owner, target, type, max_mas, - * min_mas, max_interval and is_multicast fields. If the target is a - * uwb_dev it must be referenced. - * - * The reservation's callback will be called when the reservation is - * accepted, denied or times out. - */ -int uwb_rsv_establish(struct uwb_rsv *rsv) -{ - struct uwb_rc *rc = rsv->rc; - struct uwb_mas_bm available; - struct device *dev = &rc->uwb_dev.dev; - int ret; - - mutex_lock(&rc->rsvs_mutex); - ret = uwb_rsv_get_stream(rsv); - if (ret) { - dev_err(dev, "%s: uwb_rsv_get_stream failed: %d\n", - __func__, ret); - goto out; - } - - rsv->tiebreaker = prandom_u32() & 1; - /* get available mas bitmap */ - uwb_drp_available(rc, &available); - - ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); - if (ret == UWB_RSV_ALLOC_NOT_FOUND) { - ret = -EBUSY; - uwb_rsv_put_stream(rsv); - dev_err(dev, "%s: uwb_rsv_find_best_allocation failed: %d\n", - __func__, ret); - goto out; - } - - ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); - if (ret != 0) { - uwb_rsv_put_stream(rsv); - dev_err(dev, "%s: uwb_drp_avail_reserve_pending failed: %d\n", - __func__, ret); - goto out; - } - - uwb_rsv_get(rsv); - list_add_tail(&rsv->rc_node, &rc->reservations); - rsv->owner = &rc->uwb_dev; - uwb_dev_get(rsv->owner); - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); -out: - mutex_unlock(&rc->rsvs_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(uwb_rsv_establish); - -/** - * uwb_rsv_modify - modify an already established reservation - * @rsv: the reservation to modify - * @max_mas: new maximum MAS to reserve - * @min_mas: new minimum MAS to reserve - * @max_interval: new max_interval to use - * - * FIXME: implement this once there are PALs that use it. - */ -int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) -{ - return -ENOSYS; -} -EXPORT_SYMBOL_GPL(uwb_rsv_modify); - -/* - * move an already established reservation (rc->rsvs_mutex must to be - * taken when tis function is called) - */ -int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) -{ - struct uwb_rc *rc = rsv->rc; - struct uwb_drp_backoff_win *bow = &rc->bow; - struct device *dev = &rc->uwb_dev.dev; - struct uwb_rsv_move *mv; - int ret = 0; - - if (!bow->can_reserve_extra_mases) - return -EBUSY; - - mv = &rsv->mv; - - if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { - - if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { - /* We want to move the reservation */ - bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); - uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); - } - } else { - dev_dbg(dev, "new allocation not found\n"); - } - - return ret; -} - -/* It will try to move every reservation in state O_ESTABLISHED giving - * to the MAS allocator algorithm an availability that is the real one - * plus the allocation already established from the reservation. */ -void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) -{ - struct uwb_drp_backoff_win *bow = &rc->bow; - struct uwb_rsv *rsv; - struct uwb_mas_bm mas; - - if (!bow->can_reserve_extra_mases) - return; - - list_for_each_entry(rsv, &rc->reservations, rc_node) { - if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || - rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { - uwb_drp_available(rc, &mas); - bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); - uwb_rsv_try_move(rsv, &mas); - } - } - -} - -/** - * uwb_rsv_terminate - terminate an established reservation - * @rsv: the reservation to terminate - * - * A reservation is terminated by removing the DRP IE from the beacon, - * the other end will consider the reservation to be terminated when - * it does not see the DRP IE for at least mMaxLostBeacons. - * - * If applicable, the reference to the target uwb_dev will be released. - */ -void uwb_rsv_terminate(struct uwb_rsv *rsv) -{ - struct uwb_rc *rc = rsv->rc; - - mutex_lock(&rc->rsvs_mutex); - - if (rsv->state != UWB_RSV_STATE_NONE) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); - - mutex_unlock(&rc->rsvs_mutex); -} -EXPORT_SYMBOL_GPL(uwb_rsv_terminate); - -/** - * uwb_rsv_accept - accept a new reservation from a peer - * @rsv: the reservation - * @cb: call back for reservation changes - * @pal_priv: data to be passed in the above call back - * - * Reservation requests from peers are denied unless a PAL accepts it - * by calling this function. - * - * The PAL call uwb_rsv_destroy() for all accepted reservations before - * calling uwb_pal_unregister(). - */ -void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) -{ - uwb_rsv_get(rsv); - - rsv->callback = cb; - rsv->pal_priv = pal_priv; - rsv->state = UWB_RSV_STATE_T_ACCEPTED; -} -EXPORT_SYMBOL_GPL(uwb_rsv_accept); - -/* - * Is a received DRP IE for this reservation? - */ -static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, - struct uwb_ie_drp *drp_ie) -{ - struct uwb_dev_addr *rsv_src; - int stream; - - stream = uwb_ie_drp_stream_index(drp_ie); - - if (rsv->stream != stream) - return false; - - switch (rsv->target.type) { - case UWB_RSV_TARGET_DEVADDR: - return rsv->stream == stream; - case UWB_RSV_TARGET_DEV: - if (uwb_ie_drp_owner(drp_ie)) - rsv_src = &rsv->owner->dev_addr; - else - rsv_src = &rsv->target.dev->dev_addr; - return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; - } - return false; -} - -static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, - struct uwb_dev *src, - struct uwb_ie_drp *drp_ie) -{ - struct uwb_rsv *rsv; - struct uwb_pal *pal; - enum uwb_rsv_state state; - - rsv = uwb_rsv_alloc(rc); - if (!rsv) - return NULL; - - rsv->rc = rc; - rsv->owner = src; - uwb_dev_get(rsv->owner); - rsv->target.type = UWB_RSV_TARGET_DEV; - rsv->target.dev = &rc->uwb_dev; - uwb_dev_get(&rc->uwb_dev); - rsv->type = uwb_ie_drp_type(drp_ie); - rsv->stream = uwb_ie_drp_stream_index(drp_ie); - uwb_drp_ie_to_bm(&rsv->mas, drp_ie); - - /* - * See if any PALs are interested in this reservation. If not, - * deny the request. - */ - rsv->state = UWB_RSV_STATE_T_DENIED; - mutex_lock(&rc->uwb_dev.mutex); - list_for_each_entry(pal, &rc->pals, node) { - if (pal->new_rsv) - pal->new_rsv(pal, rsv); - if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) - break; - } - mutex_unlock(&rc->uwb_dev.mutex); - - list_add_tail(&rsv->rc_node, &rc->reservations); - state = rsv->state; - rsv->state = UWB_RSV_STATE_NONE; - - /* FIXME: do something sensible here */ - if (state == UWB_RSV_STATE_T_ACCEPTED - && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { - /* FIXME: do something sensible here */ - } else { - uwb_rsv_set_state(rsv, state); - } - - return rsv; -} - -/** - * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations - * @rsv: the reservation. - * @mas: returns the available MAS. - * - * The usable MAS of a reservation may be less than the negotiated MAS - * if alien BPs are present. - */ -void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) -{ - bitmap_zero(mas->bm, UWB_NUM_MAS); - bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); -} -EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); - -/** - * uwb_rsv_find - find a reservation for a received DRP IE. - * @rc: the radio controller - * @src: source of the DRP IE - * @drp_ie: the DRP IE - * - * If the reservation cannot be found and the DRP IE is from a peer - * attempting to establish a new reservation, create a new reservation - * and add it to the list. - */ -struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, - struct uwb_ie_drp *drp_ie) -{ - struct uwb_rsv *rsv; - - list_for_each_entry(rsv, &rc->reservations, rc_node) { - if (uwb_rsv_match(rsv, src, drp_ie)) - return rsv; - } - - if (uwb_ie_drp_owner(drp_ie)) - return uwb_rsv_new_target(rc, src, drp_ie); - - return NULL; -} - -/* - * Go through all the reservations and check for timeouts and (if - * necessary) update their DRP IEs. - * - * FIXME: look at building the SET_DRP_IE command here rather than - * having to rescan the list in uwb_rc_send_all_drp_ie(). - */ -static bool uwb_rsv_update_all(struct uwb_rc *rc) -{ - struct uwb_rsv *rsv, *t; - bool ie_updated = false; - - list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { - if (!rsv->ie_valid) { - uwb_drp_ie_update(rsv); - ie_updated = true; - } - } - - return ie_updated; -} - -void uwb_rsv_queue_update(struct uwb_rc *rc) -{ - unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; - - queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); -} - -/** - * uwb_rsv_sched_update - schedule an update of the DRP IEs - * @rc: the radio controller. - * - * To improve performance and ensure correctness with [ECMA-368] the - * number of SET-DRP-IE commands that are done are limited. - * - * DRP IEs update come from two sources: DRP events from the hardware - * which all occur at the beginning of the superframe ('syncronous' - * events) and reservation establishment/termination requests from - * PALs or timers ('asynchronous' events). - * - * A delayed work ensures that all the synchronous events result in - * one SET-DRP-IE command. - * - * Additional logic (the set_drp_ie_pending and rsv_updated_postponed - * flags) will prevent an asynchrous event starting a SET-DRP-IE - * command if one is currently awaiting a response. - * - * FIXME: this does leave a window where an asynchrous event can delay - * the SET-DRP-IE for a synchronous event by one superframe. - */ -void uwb_rsv_sched_update(struct uwb_rc *rc) -{ - spin_lock_irq(&rc->rsvs_lock); - if (!delayed_work_pending(&rc->rsv_update_work)) { - if (rc->set_drp_ie_pending > 0) { - rc->set_drp_ie_pending++; - goto unlock; - } - uwb_rsv_queue_update(rc); - } -unlock: - spin_unlock_irq(&rc->rsvs_lock); -} - -/* - * Update DRP IEs and, if necessary, the DRP Availability IE and send - * the updated IEs to the radio controller. - */ -static void uwb_rsv_update_work(struct work_struct *work) -{ - struct uwb_rc *rc = container_of(work, struct uwb_rc, - rsv_update_work.work); - bool ie_updated; - - mutex_lock(&rc->rsvs_mutex); - - ie_updated = uwb_rsv_update_all(rc); - - if (!rc->drp_avail.ie_valid) { - uwb_drp_avail_ie_update(rc); - ie_updated = true; - } - - if (ie_updated && (rc->set_drp_ie_pending == 0)) - uwb_rc_send_all_drp_ie(rc); - - mutex_unlock(&rc->rsvs_mutex); -} - -static void uwb_rsv_alien_bp_work(struct work_struct *work) -{ - struct uwb_rc *rc = container_of(work, struct uwb_rc, - rsv_alien_bp_work.work); - struct uwb_rsv *rsv; - - mutex_lock(&rc->rsvs_mutex); - - list_for_each_entry(rsv, &rc->reservations, rc_node) { - if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { - uwb_rsv_callback(rsv); - } - } - - mutex_unlock(&rc->rsvs_mutex); -} - -static void uwb_rsv_timer(struct timer_list *t) -{ - struct uwb_rsv *rsv = from_timer(rsv, t, timer); - - queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); -} - -/** - * uwb_rsv_remove_all - remove all reservations - * @rc: the radio controller - * - * A DRP IE update is not done. - */ -void uwb_rsv_remove_all(struct uwb_rc *rc) -{ - struct uwb_rsv *rsv, *t; - - mutex_lock(&rc->rsvs_mutex); - list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { - if (rsv->state != UWB_RSV_STATE_NONE) - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); - del_timer_sync(&rsv->timer); - } - /* Cancel any postponed update. */ - rc->set_drp_ie_pending = 0; - mutex_unlock(&rc->rsvs_mutex); - - cancel_delayed_work_sync(&rc->rsv_update_work); - flush_workqueue(rc->rsv_workq); - - mutex_lock(&rc->rsvs_mutex); - list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { - uwb_rsv_remove(rsv); - } - mutex_unlock(&rc->rsvs_mutex); -} - -void uwb_rsv_init(struct uwb_rc *rc) -{ - INIT_LIST_HEAD(&rc->reservations); - INIT_LIST_HEAD(&rc->cnflt_alien_list); - mutex_init(&rc->rsvs_mutex); - spin_lock_init(&rc->rsvs_lock); - INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); - INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); - rc->bow.can_reserve_extra_mases = true; - rc->bow.total_expired = 0; - rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; - timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0); - - bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); -} - -int uwb_rsv_setup(struct uwb_rc *rc) -{ - char name[16]; - - snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); - rc->rsv_workq = create_singlethread_workqueue(name); - if (rc->rsv_workq == NULL) - return -ENOMEM; - - return 0; -} - -void uwb_rsv_cleanup(struct uwb_rc *rc) -{ - uwb_rsv_remove_all(rc); - destroy_workqueue(rc->rsv_workq); -} diff --git a/drivers/staging/uwb/scan.c b/drivers/staging/uwb/scan.c deleted file mode 100644 index ffc3f452302d..000000000000 --- a/drivers/staging/uwb/scan.c +++ /dev/null @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Scanning management - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal - * with each other. Currently seems that START_BEACON while - * SCAN_ONLY will cancel the scan, so we need to update the - * state here. Clarification request sent by email on - * 10/05/2005. - * 10/28/2005 No clear answer heard--maybe we'll hack the API - * so that when we start beaconing, if the HC is - * scanning in a mode not compatible with beaconing - * we just fail. - */ - -#include <linux/device.h> -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/stat.h> -#include "uwb-internal.h" - - -/** - * Start/stop scanning in a radio controller - * - * @rc: UWB Radio Controller - * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12] - * @type: Type of scanning to do. - * @bpst_offset: value at which to start scanning (if type == - * UWB_SCAN_ONLY_STARTTIME) - * @returns: 0 if ok, < 0 errno code on error - * - * We put the command on kmalloc'ed memory as some arches cannot do - * USB from the stack. The reply event is copied from an stage buffer, - * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. - */ -int uwb_rc_scan(struct uwb_rc *rc, - unsigned channel, enum uwb_scan_type type, - unsigned bpst_offset) -{ - int result; - struct uwb_rc_cmd_scan *cmd; - struct uwb_rc_evt_confirm reply; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_kzalloc; - mutex_lock(&rc->uwb_dev.mutex); - cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; - cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN); - cmd->bChannelNumber = channel; - cmd->bScanState = type; - cmd->wStartTime = cpu_to_le16(bpst_offset); - reply.rceb.bEventType = UWB_RC_CET_GENERAL; - reply.rceb.wEvent = UWB_RC_CMD_SCAN; - result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd), - &reply.rceb, sizeof(reply)); - if (result < 0) - goto error_cmd; - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { - dev_err(&rc->uwb_dev.dev, - "SCAN: command execution failed: %s (%d)\n", - uwb_rc_strerror(reply.bResultCode), reply.bResultCode); - result = -EIO; - goto error_cmd; - } - rc->scanning = channel; - rc->scan_type = type; -error_cmd: - mutex_unlock(&rc->uwb_dev.mutex); - kfree(cmd); -error_kzalloc: - return result; -} - -/* - * Print scanning state - */ -static ssize_t uwb_rc_scan_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - ssize_t result; - - mutex_lock(&rc->uwb_dev.mutex); - result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type); - mutex_unlock(&rc->uwb_dev.mutex); - return result; -} - -/* - * - */ -static ssize_t uwb_rc_scan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct uwb_dev *uwb_dev = to_uwb_dev(dev); - struct uwb_rc *rc = uwb_dev->rc; - unsigned channel; - unsigned type; - unsigned bpst_offset = 0; - ssize_t result = -EINVAL; - - result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset); - if (result >= 2 && type < UWB_SCAN_TOP) - result = uwb_rc_scan(rc, channel, type, bpst_offset); - - return result < 0 ? result : size; -} - -/** Radio Control sysfs interface (declaration) */ -DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store); diff --git a/drivers/staging/uwb/umc-bus.c b/drivers/staging/uwb/umc-bus.c deleted file mode 100644 index 8b931f66a720..000000000000 --- a/drivers/staging/uwb/umc-bus.c +++ /dev/null @@ -1,211 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Bus for UWB Multi-interface Controller capabilities. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/sysfs.h> -#include <linux/workqueue.h> -#include <linux/module.h> -#include <linux/pci.h> -#include "include/umc.h" - -static int umc_bus_pre_reset_helper(struct device *dev, void *data) -{ - int ret = 0; - - if (dev->driver) { - struct umc_dev *umc = to_umc_dev(dev); - struct umc_driver *umc_drv = to_umc_driver(dev->driver); - - if (umc_drv->pre_reset) - ret = umc_drv->pre_reset(umc); - else - device_release_driver(dev); - } - return ret; -} - -static int umc_bus_post_reset_helper(struct device *dev, void *data) -{ - int ret = 0; - - if (dev->driver) { - struct umc_dev *umc = to_umc_dev(dev); - struct umc_driver *umc_drv = to_umc_driver(dev->driver); - - if (umc_drv->post_reset) - ret = umc_drv->post_reset(umc); - } else - ret = device_attach(dev); - - return ret; -} - -/** - * umc_controller_reset - reset the whole UMC controller - * @umc: the UMC device for the radio controller. - * - * Drivers or all capabilities of the controller will have their - * pre_reset methods called or be unbound from their device. Then all - * post_reset methods will be called or the drivers will be rebound. - * - * Radio controllers must provide pre_reset and post_reset methods and - * reset the hardware in their start method. - * - * If this is called while a probe() or remove() is in progress it - * will return -EAGAIN and not perform the reset. - */ -int umc_controller_reset(struct umc_dev *umc) -{ - struct device *parent = umc->dev.parent; - int ret = 0; - - if (!device_trylock(parent)) - return -EAGAIN; - ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); - if (ret >= 0) - ret = device_for_each_child(parent, parent, umc_bus_post_reset_helper); - device_unlock(parent); - - return ret; -} -EXPORT_SYMBOL_GPL(umc_controller_reset); - -/** - * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device. - * @umc_drv: umc driver with match_data pointing to a zero-terminated - * table of pci_device_id's. - * @umc: umc device whose parent is to be matched. - */ -int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc) -{ - const struct pci_device_id *id_table = umc_drv->match_data; - struct pci_dev *pci; - - if (!dev_is_pci(umc->dev.parent)) - return 0; - - pci = to_pci_dev(umc->dev.parent); - return pci_match_id(id_table, pci) != NULL; -} -EXPORT_SYMBOL_GPL(umc_match_pci_id); - -static int umc_bus_rescan_helper(struct device *dev, void *data) -{ - int ret = 0; - - if (!dev->driver) - ret = device_attach(dev); - - return ret; -} - -static void umc_bus_rescan(struct device *parent) -{ - int err; - - /* - * We can't use bus_rescan_devices() here as it deadlocks when - * it tries to retake the dev->parent semaphore. - */ - err = device_for_each_child(parent, NULL, umc_bus_rescan_helper); - if (err < 0) - printk(KERN_WARNING "%s: rescan of bus failed: %d\n", - KBUILD_MODNAME, err); -} - -static int umc_bus_match(struct device *dev, struct device_driver *drv) -{ - struct umc_dev *umc = to_umc_dev(dev); - struct umc_driver *umc_driver = to_umc_driver(drv); - - if (umc->cap_id == umc_driver->cap_id) { - if (umc_driver->match) - return umc_driver->match(umc_driver, umc); - else - return 1; - } - return 0; -} - -static int umc_device_probe(struct device *dev) -{ - struct umc_dev *umc; - struct umc_driver *umc_driver; - int err; - - umc_driver = to_umc_driver(dev->driver); - umc = to_umc_dev(dev); - - get_device(dev); - err = umc_driver->probe(umc); - if (err) - put_device(dev); - else - umc_bus_rescan(dev->parent); - - return err; -} - -static int umc_device_remove(struct device *dev) -{ - struct umc_dev *umc; - struct umc_driver *umc_driver; - - umc_driver = to_umc_driver(dev->driver); - umc = to_umc_dev(dev); - - umc_driver->remove(umc); - put_device(dev); - return 0; -} - -static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct umc_dev *umc = to_umc_dev(dev); - - return sprintf(buf, "0x%02x\n", umc->cap_id); -} -static DEVICE_ATTR_RO(capability_id); - -static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct umc_dev *umc = to_umc_dev(dev); - - return sprintf(buf, "0x%04x\n", umc->version); -} -static DEVICE_ATTR_RO(version); - -static struct attribute *umc_dev_attrs[] = { - &dev_attr_capability_id.attr, - &dev_attr_version.attr, - NULL, -}; -ATTRIBUTE_GROUPS(umc_dev); - -struct bus_type umc_bus_type = { - .name = "umc", - .match = umc_bus_match, - .probe = umc_device_probe, - .remove = umc_device_remove, - .dev_groups = umc_dev_groups, -}; -EXPORT_SYMBOL_GPL(umc_bus_type); - -static int __init umc_bus_init(void) -{ - return bus_register(&umc_bus_type); -} -module_init(umc_bus_init); - -static void __exit umc_bus_exit(void) -{ - bus_unregister(&umc_bus_type); -} -module_exit(umc_bus_exit); - -MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus"); -MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/uwb/umc-dev.c b/drivers/staging/uwb/umc-dev.c deleted file mode 100644 index 0c71caae00be..000000000000 --- a/drivers/staging/uwb/umc-dev.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB Multi-interface Controller device management. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include "include/umc.h" - -static void umc_device_release(struct device *dev) -{ - struct umc_dev *umc = to_umc_dev(dev); - - kfree(umc); -} - -/** - * umc_device_create - allocate a child UMC device - * @parent: parent of the new UMC device. - * @n: index of the new device. - * - * The new UMC device will have a bus ID of the parent with '-n' - * appended. - */ -struct umc_dev *umc_device_create(struct device *parent, int n) -{ - struct umc_dev *umc; - - umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); - if (umc) { - dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); - umc->dev.parent = parent; - umc->dev.bus = &umc_bus_type; - umc->dev.release = umc_device_release; - - umc->dev.dma_mask = parent->dma_mask; - } - return umc; -} -EXPORT_SYMBOL_GPL(umc_device_create); - -/** - * umc_device_register - register a UMC device - * @umc: pointer to the UMC device - * - * The memory resource for the UMC device is acquired and the device - * registered with the system. - */ -int umc_device_register(struct umc_dev *umc) -{ - int err; - - err = request_resource(umc->resource.parent, &umc->resource); - if (err < 0) { - dev_err(&umc->dev, "can't allocate resource range %pR: %d\n", - &umc->resource, err); - goto error_request_resource; - } - - err = device_register(&umc->dev); - if (err < 0) - goto error_device_register; - return 0; - -error_device_register: - put_device(&umc->dev); - release_resource(&umc->resource); -error_request_resource: - return err; -} -EXPORT_SYMBOL_GPL(umc_device_register); - -/** - * umc_device_unregister - unregister a UMC device - * @umc: pointer to the UMC device - * - * First we unregister the device, make sure the driver can do it's - * resource release thing and then we try to release any left over - * resources. We take a ref to the device, to make sure it doesn't - * disappear under our feet. - */ -void umc_device_unregister(struct umc_dev *umc) -{ - struct device *dev; - if (!umc) - return; - dev = get_device(&umc->dev); - device_unregister(&umc->dev); - release_resource(&umc->resource); - put_device(dev); -} -EXPORT_SYMBOL_GPL(umc_device_unregister); diff --git a/drivers/staging/uwb/umc-drv.c b/drivers/staging/uwb/umc-drv.c deleted file mode 100644 index ed3bd220e8c2..000000000000 --- a/drivers/staging/uwb/umc-drv.c +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * UWB Multi-interface Controller driver management. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/export.h> -#include "include/umc.h" - -int __umc_driver_register(struct umc_driver *umc_drv, struct module *module, - const char *mod_name) -{ - umc_drv->driver.name = umc_drv->name; - umc_drv->driver.owner = module; - umc_drv->driver.mod_name = mod_name; - umc_drv->driver.bus = &umc_bus_type; - - return driver_register(&umc_drv->driver); -} -EXPORT_SYMBOL_GPL(__umc_driver_register); - -/** - * umc_driver_register - unregister a UMC capabiltity driver. - * @umc_drv: pointer to the driver. - */ -void umc_driver_unregister(struct umc_driver *umc_drv) -{ - driver_unregister(&umc_drv->driver); -} -EXPORT_SYMBOL_GPL(umc_driver_unregister); diff --git a/drivers/staging/uwb/uwb-debug.c b/drivers/staging/uwb/uwb-debug.c deleted file mode 100644 index dd14df219ef8..000000000000 --- a/drivers/staging/uwb/uwb-debug.c +++ /dev/null @@ -1,354 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Debug support - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - * - * FIXME: doc - */ - -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/notifier.h> -#include <linux/device.h> -#include <linux/debugfs.h> -#include <linux/uaccess.h> -#include <linux/seq_file.h> - -#include "include/debug-cmd.h" -#include "uwb-internal.h" - -/* - * Debug interface - * - * Per radio controller debugfs files (in uwb/uwbN/): - * - * command: Flexible command interface (see <linux/uwb/debug-cmd.h>). - * - * reservations: information on reservations. - * - * accept: Set to true (Y or 1) to accept reservation requests from - * peers. - * - * drp_avail: DRP availability information. - */ - -struct uwb_dbg { - struct uwb_pal pal; - - bool accept; - struct list_head rsvs; - - struct dentry *root_d; - struct dentry *command_f; - struct dentry *reservations_f; - struct dentry *accept_f; - struct dentry *drp_avail_f; - spinlock_t list_lock; -}; - -static struct dentry *root_dir; - -static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) -{ - struct uwb_dbg *dbg = rsv->pal_priv; - - uwb_rsv_dump("debug", rsv); - - if (rsv->state == UWB_RSV_STATE_NONE) { - spin_lock(&dbg->list_lock); - list_del(&rsv->pal_node); - spin_unlock(&dbg->list_lock); - uwb_rsv_destroy(rsv); - } -} - -static int cmd_rsv_establish(struct uwb_rc *rc, - struct uwb_dbg_cmd_rsv_establish *cmd) -{ - struct uwb_mac_addr macaddr; - struct uwb_rsv *rsv; - struct uwb_dev *target; - int ret; - - memcpy(&macaddr, cmd->target, sizeof(macaddr)); - target = uwb_dev_get_by_macaddr(rc, &macaddr); - if (target == NULL) - return -ENODEV; - - rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg); - if (rsv == NULL) { - uwb_dev_put(target); - return -ENOMEM; - } - - rsv->target.type = UWB_RSV_TARGET_DEV; - rsv->target.dev = target; - rsv->type = cmd->type; - rsv->max_mas = cmd->max_mas; - rsv->min_mas = cmd->min_mas; - rsv->max_interval = cmd->max_interval; - - ret = uwb_rsv_establish(rsv); - if (ret) - uwb_rsv_destroy(rsv); - else { - spin_lock(&(rc->dbg)->list_lock); - list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); - spin_unlock(&(rc->dbg)->list_lock); - } - return ret; -} - -static int cmd_rsv_terminate(struct uwb_rc *rc, - struct uwb_dbg_cmd_rsv_terminate *cmd) -{ - struct uwb_rsv *rsv, *found = NULL; - int i = 0; - - spin_lock(&(rc->dbg)->list_lock); - - list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { - if (i == cmd->index) { - found = rsv; - uwb_rsv_get(found); - break; - } - i++; - } - - spin_unlock(&(rc->dbg)->list_lock); - - if (!found) - return -EINVAL; - - uwb_rsv_terminate(found); - uwb_rsv_put(found); - - return 0; -} - -static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add) -{ - return uwb_rc_ie_add(rc, - (const struct uwb_ie_hdr *) ie_to_add->data, - ie_to_add->len); -} - -static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm) -{ - return uwb_rc_ie_rm(rc, ie_to_rm->data[0]); -} - -static ssize_t command_write(struct file *file, const char __user *buf, - size_t len, loff_t *off) -{ - struct uwb_rc *rc = file->private_data; - struct uwb_dbg_cmd cmd; - int ret = 0; - - if (len != sizeof(struct uwb_dbg_cmd)) - return -EINVAL; - - if (copy_from_user(&cmd, buf, len) != 0) - return -EFAULT; - - switch (cmd.type) { - case UWB_DBG_CMD_RSV_ESTABLISH: - ret = cmd_rsv_establish(rc, &cmd.rsv_establish); - break; - case UWB_DBG_CMD_RSV_TERMINATE: - ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); - break; - case UWB_DBG_CMD_IE_ADD: - ret = cmd_ie_add(rc, &cmd.ie_add); - break; - case UWB_DBG_CMD_IE_RM: - ret = cmd_ie_rm(rc, &cmd.ie_rm); - break; - case UWB_DBG_CMD_RADIO_START: - ret = uwb_radio_start(&rc->dbg->pal); - break; - case UWB_DBG_CMD_RADIO_STOP: - uwb_radio_stop(&rc->dbg->pal); - break; - default: - return -EINVAL; - } - - return ret < 0 ? ret : len; -} - -static const struct file_operations command_fops = { - .open = simple_open, - .write = command_write, - .read = NULL, - .llseek = no_llseek, - .owner = THIS_MODULE, -}; - -static int reservations_show(struct seq_file *s, void *p) -{ - struct uwb_rc *rc = s->private; - struct uwb_rsv *rsv; - - mutex_lock(&rc->rsvs_mutex); - - list_for_each_entry(rsv, &rc->reservations, rc_node) { - struct uwb_dev_addr devaddr; - char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; - bool is_owner; - - uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); - if (rsv->target.type == UWB_RSV_TARGET_DEV) { - devaddr = rsv->target.dev->dev_addr; - is_owner = &rc->uwb_dev == rsv->owner; - } else { - devaddr = rsv->target.devaddr; - is_owner = true; - } - uwb_dev_addr_print(target, sizeof(target), &devaddr); - - seq_printf(s, "%c %s -> %s: %s\n", - is_owner ? 'O' : 'T', - owner, target, uwb_rsv_state_str(rsv->state)); - seq_printf(s, " stream: %d type: %s\n", - rsv->stream, uwb_rsv_type_str(rsv->type)); - seq_printf(s, " %*pb\n", UWB_NUM_MAS, rsv->mas.bm); - } - - mutex_unlock(&rc->rsvs_mutex); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(reservations); - -static int drp_avail_show(struct seq_file *s, void *p) -{ - struct uwb_rc *rc = s->private; - - seq_printf(s, "global: %*pb\n", UWB_NUM_MAS, rc->drp_avail.global); - seq_printf(s, "local: %*pb\n", UWB_NUM_MAS, rc->drp_avail.local); - seq_printf(s, "pending: %*pb\n", UWB_NUM_MAS, rc->drp_avail.pending); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(drp_avail); - -static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel) -{ - struct device *dev = &pal->rc->uwb_dev.dev; - - if (channel > 0) - dev_info(dev, "debug: channel %d started\n", channel); - else - dev_info(dev, "debug: channel stopped\n"); -} - -static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv) -{ - struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); - - if (dbg->accept) { - spin_lock(&dbg->list_lock); - list_add_tail(&rsv->pal_node, &dbg->rsvs); - spin_unlock(&dbg->list_lock); - uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); - } -} - -/** - * uwb_dbg_add_rc - add a debug interface for a radio controller - * @rc: the radio controller - */ -void uwb_dbg_add_rc(struct uwb_rc *rc) -{ - rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL); - if (rc->dbg == NULL) - return; - - INIT_LIST_HEAD(&rc->dbg->rsvs); - spin_lock_init(&(rc->dbg)->list_lock); - - uwb_pal_init(&rc->dbg->pal); - rc->dbg->pal.rc = rc; - rc->dbg->pal.channel_changed = uwb_dbg_channel_changed; - rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; - uwb_pal_register(&rc->dbg->pal); - - if (root_dir) { - rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), - root_dir); - rc->dbg->command_f = debugfs_create_file("command", 0200, - rc->dbg->root_d, rc, - &command_fops); - rc->dbg->reservations_f = debugfs_create_file("reservations", 0444, - rc->dbg->root_d, rc, - &reservations_fops); - rc->dbg->accept_f = debugfs_create_bool("accept", 0644, - rc->dbg->root_d, - &rc->dbg->accept); - rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444, - rc->dbg->root_d, rc, - &drp_avail_fops); - } -} - -/** - * uwb_dbg_del_rc - remove a radio controller's debug interface - * @rc: the radio controller - */ -void uwb_dbg_del_rc(struct uwb_rc *rc) -{ - struct uwb_rsv *rsv, *t; - - if (rc->dbg == NULL) - return; - - list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { - uwb_rsv_terminate(rsv); - } - - uwb_pal_unregister(&rc->dbg->pal); - - if (root_dir) { - debugfs_remove(rc->dbg->drp_avail_f); - debugfs_remove(rc->dbg->accept_f); - debugfs_remove(rc->dbg->reservations_f); - debugfs_remove(rc->dbg->command_f); - debugfs_remove(rc->dbg->root_d); - } -} - -/** - * uwb_dbg_exit - initialize the debug interface sub-module - */ -void uwb_dbg_init(void) -{ - root_dir = debugfs_create_dir("uwb", NULL); -} - -/** - * uwb_dbg_exit - clean-up the debug interface sub-module - */ -void uwb_dbg_exit(void) -{ - debugfs_remove(root_dir); -} - -/** - * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL - * @pal: The PAL. - */ -struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal) -{ - struct uwb_rc *rc = pal->rc; - - if (root_dir && rc->dbg && rc->dbg->root_d && pal->name) - return debugfs_create_dir(pal->name, rc->dbg->root_d); - return NULL; -} diff --git a/drivers/staging/uwb/uwb-internal.h b/drivers/staging/uwb/uwb-internal.h deleted file mode 100644 index 4c2fdac7f610..000000000000 --- a/drivers/staging/uwb/uwb-internal.h +++ /dev/null @@ -1,366 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB internal API - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This contains most of the internal API for UWB. This is stuff used - * across the stack that of course, is of no interest to the rest. - * - * Some parts might end up going public (like uwb_rc_*())... - */ - -#ifndef __UWB_INTERNAL_H__ -#define __UWB_INTERNAL_H__ - -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include "uwb.h" - -struct uwb_beca_e; - -/* General device API */ -extern void uwb_dev_init(struct uwb_dev *uwb_dev); -extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *); -extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, - struct uwb_rc *parent_rc); -extern void uwb_dev_rm(struct uwb_dev *uwb_dev); -extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *); -extern void uwbd_dev_offair(struct uwb_beca_e *); -void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event); - -/* General UWB Radio Controller Internal API */ -extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *); -static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc) -{ - uwb_dev_get(&rc->uwb_dev); - return rc; -} - -static inline void __uwb_rc_put(struct uwb_rc *rc) -{ - if (rc) - uwb_dev_put(&rc->uwb_dev); -} - -extern int uwb_rc_reset(struct uwb_rc *rc); -extern int uwb_rc_beacon(struct uwb_rc *rc, - int channel, unsigned bpst_offset); -extern int uwb_rc_scan(struct uwb_rc *rc, - unsigned channel, enum uwb_scan_type type, - unsigned bpst_offset); -extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); - -void uwb_rc_ie_init(struct uwb_rc *); -int uwb_rc_ie_setup(struct uwb_rc *); -void uwb_rc_ie_release(struct uwb_rc *); -int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, - char *buf, size_t size); -int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); - - -extern const char *uwb_rc_strerror(unsigned code); - -/* - * Time to wait for a response to an RC command. - * - * Some commands can take a long time to response. e.g., START_BEACON - * may scan for several superframes before joining an existing beacon - * group and this can take around 600 ms. - */ -#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */ - -/* - * Notification/Event Handlers - */ - -struct uwb_rc_neh; - -extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg); - - -void uwb_rc_neh_create(struct uwb_rc *rc); -void uwb_rc_neh_destroy(struct uwb_rc *rc); - -struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg); -void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh); -void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh); -void uwb_rc_neh_put(struct uwb_rc_neh *neh); - -/* Event size tables */ -extern int uwb_est_create(void); -extern void uwb_est_destroy(void); - -/* - * UWB conflicting alien reservations - */ -struct uwb_cnflt_alien { - struct uwb_rc *rc; - struct list_head rc_node; - struct uwb_mas_bm mas; - struct timer_list timer; - struct work_struct cnflt_update_work; -}; - -enum uwb_uwb_rsv_alloc_result { - UWB_RSV_ALLOC_FOUND = 0, - UWB_RSV_ALLOC_NOT_FOUND, -}; - -enum uwb_rsv_mas_status { - UWB_RSV_MAS_NOT_AVAIL = 1, - UWB_RSV_MAS_SAFE, - UWB_RSV_MAS_UNSAFE, -}; - -struct uwb_rsv_col_set_info { - unsigned char start_col; - unsigned char interval; - unsigned char safe_mas_per_col; - unsigned char unsafe_mas_per_col; -}; - -struct uwb_rsv_col_info { - unsigned char max_avail_safe; - unsigned char max_avail_unsafe; - unsigned char highest_mas[UWB_MAS_PER_ZONE]; - struct uwb_rsv_col_set_info csi; -}; - -struct uwb_rsv_row_info { - unsigned char avail[UWB_MAS_PER_ZONE]; - unsigned char free_rows; - unsigned char used_rows; -}; - -/* - * UWB find allocation - */ -struct uwb_rsv_alloc_info { - unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; - struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; - struct uwb_rsv_row_info ri; - struct uwb_mas_bm *not_available; - struct uwb_mas_bm *result; - int min_mas; - int max_mas; - int max_interval; - int total_allocated_mases; - int safe_allocated_mases; - int unsafe_allocated_mases; - int interval; -}; - -int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, - struct uwb_mas_bm *available, - struct uwb_mas_bm *result); -void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); -/* - * UWB Events & management daemon - */ - -/** - * enum uwb_event_type - types of UWB management daemon events - * - * The UWB management daemon (uwbd) can receive two types of events: - * UWB_EVT_TYPE_NOTIF - notification from the radio controller. - * UWB_EVT_TYPE_MSG - a simple message. - */ -enum uwb_event_type { - UWB_EVT_TYPE_NOTIF, - UWB_EVT_TYPE_MSG, -}; - -/** - * struct uwb_event_notif - an event for a radio controller notification - * @size: Size of the buffer (ie: Guaranteed to contain at least - * a full 'struct uwb_rceb') - * @rceb: Pointer to a kmalloced() event payload - */ -struct uwb_event_notif { - size_t size; - struct uwb_rceb *rceb; -}; - -/** - * enum uwb_event_message - an event for a message for asynchronous processing - * - * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware. - */ -enum uwb_event_message { - UWB_EVT_MSG_RESET, -}; - -/** - * UWB Event - * @rc: Radio controller that emitted the event (referenced) - * @ts_jiffies: Timestamp, when was it received - * @type: This event's type. - */ -struct uwb_event { - struct list_head list_node; - struct uwb_rc *rc; - unsigned long ts_jiffies; - enum uwb_event_type type; - union { - struct uwb_event_notif notif; - enum uwb_event_message message; - }; -}; - -extern void uwbd_start(struct uwb_rc *rc); -extern void uwbd_stop(struct uwb_rc *rc); -extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); -extern void uwbd_event_queue(struct uwb_event *); -void uwbd_flush(struct uwb_rc *rc); - -/* UWB event handlers */ -extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *); -extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); -extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); -extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); -extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *); -extern int uwbd_evt_handle_rc_drp(struct uwb_event *); -extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *); - -int uwbd_msg_handle_reset(struct uwb_event *evt); - - -/* - * Address management - */ -int uwb_rc_dev_addr_assign(struct uwb_rc *rc); -int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt); - -/* - * UWB Beacon Cache - * - * Each beacon we received is kept in a cache--when we receive that - * beacon consistently, that means there is a new device that we have - * to add to the system. - */ - -extern unsigned long beacon_timeout_ms; - -/** - * Beacon cache entry - * - * @jiffies_refresh: last time a beacon was received that refreshed - * this cache entry. - * @uwb_dev: device connected to this beacon. This pointer is not - * safe, you need to get it with uwb_dev_try_get() - * - * @hits: how many time we have seen this beacon since last time we - * cleared it - */ -struct uwb_beca_e { - struct mutex mutex; - struct kref refcnt; - struct list_head node; - struct uwb_mac_addr *mac_addr; - struct uwb_dev_addr dev_addr; - u8 hits; - unsigned long ts_jiffies; - struct uwb_dev *uwb_dev; - struct uwb_rc_evt_beacon *be; - struct stats lqe_stats, rssi_stats; /* radio statistics */ -}; -struct uwb_beacon_frame; -extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, - char *, size_t); - -extern void uwb_bce_kfree(struct kref *_bce); -static inline void uwb_bce_get(struct uwb_beca_e *bce) -{ - kref_get(&bce->refcnt); -} -static inline void uwb_bce_put(struct uwb_beca_e *bce) -{ - kref_put(&bce->refcnt, uwb_bce_kfree); -} -extern void uwb_beca_purge(struct uwb_rc *rc); -extern void uwb_beca_release(struct uwb_rc *rc); - -struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, - const struct uwb_dev_addr *devaddr); -struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, - const struct uwb_mac_addr *macaddr); - -int uwb_radio_setup(struct uwb_rc *rc); -void uwb_radio_reset_state(struct uwb_rc *rc); -void uwb_radio_shutdown(struct uwb_rc *rc); -int uwb_radio_force_channel(struct uwb_rc *rc, int channel); - -/* -- UWB Sysfs representation */ -extern struct class uwb_rc_class; -extern struct bus_type uwb_bus_type; -extern struct device_attribute dev_attr_mac_address; -extern struct device_attribute dev_attr_beacon; -extern struct device_attribute dev_attr_scan; - -/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */ -void uwb_rsv_init(struct uwb_rc *rc); -int uwb_rsv_setup(struct uwb_rc *rc); -void uwb_rsv_cleanup(struct uwb_rc *rc); -void uwb_rsv_remove_all(struct uwb_rc *rc); -void uwb_rsv_get(struct uwb_rsv *rsv); -void uwb_rsv_put(struct uwb_rsv *rsv); -bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); -void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); -int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); -void uwb_rsv_backoff_win_timer(struct timer_list *t); -void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); -int uwb_rsv_status(struct uwb_rsv *rsv); -int uwb_rsv_companion_status(struct uwb_rsv *rsv); - -void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); -void uwb_rsv_remove(struct uwb_rsv *rsv); -struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, - struct uwb_ie_drp *drp_ie); -void uwb_rsv_sched_update(struct uwb_rc *rc); -void uwb_rsv_queue_update(struct uwb_rc *rc); - -int uwb_drp_ie_update(struct uwb_rsv *rsv); -void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); - -void uwb_drp_avail_init(struct uwb_rc *rc); -void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); -int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); -void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); -void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); -void uwb_drp_avail_ie_update(struct uwb_rc *rc); - -/* -- PAL support */ -void uwb_rc_pal_init(struct uwb_rc *rc); - -/* -- Misc */ - -extern ssize_t uwb_mac_frame_hdr_print(char *, size_t, - const struct uwb_mac_frame_hdr *); - -/* -- Debug interface */ -void uwb_dbg_init(void); -void uwb_dbg_exit(void); -void uwb_dbg_add_rc(struct uwb_rc *rc); -void uwb_dbg_del_rc(struct uwb_rc *rc); -struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); - -static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) -{ - device_lock(&uwb_dev->dev); -} - -static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev) -{ - device_unlock(&uwb_dev->dev); -} - -#endif /* #ifndef __UWB_INTERNAL_H__ */ diff --git a/drivers/staging/uwb/uwb.h b/drivers/staging/uwb/uwb.h deleted file mode 100644 index 6a59706ba3a0..000000000000 --- a/drivers/staging/uwb/uwb.h +++ /dev/null @@ -1,817 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB API - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: doc: overview of the API, different parts and pointers - */ - -#ifndef __LINUX__UWB_H__ -#define __LINUX__UWB_H__ - -#include <linux/limits.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/workqueue.h> -#include <asm/page.h> -#include "include/spec.h" - -struct uwb_dev; -struct uwb_beca_e; -struct uwb_rc; -struct uwb_rsv; -struct uwb_dbg; - -/** - * struct uwb_dev - a UWB Device - * @rc: UWB Radio Controller that discovered the device (kind of its - * parent). - * @bce: a beacon cache entry for this device; or NULL if the device - * is a local radio controller. - * @mac_addr: the EUI-48 address of this device. - * @dev_addr: the current DevAddr used by this device. - * @beacon_slot: the slot number the beacon is using. - * @streams: bitmap of streams allocated to reservations targeted at - * this device. For an RC, this is the streams allocated for - * reservations targeted at DevAddrs. - * - * A UWB device may either by a neighbor or part of a local radio - * controller. - */ -struct uwb_dev { - struct mutex mutex; - struct list_head list_node; - struct device dev; - struct uwb_rc *rc; /* radio controller */ - struct uwb_beca_e *bce; /* Beacon Cache Entry */ - - struct uwb_mac_addr mac_addr; - struct uwb_dev_addr dev_addr; - int beacon_slot; - DECLARE_BITMAP(streams, UWB_NUM_STREAMS); - DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); -}; -#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) - -/** - * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs - * - * RC[CE]Bs have a 'context ID' field that matches the command with - * the event received to confirm it. - * - * Maximum number of context IDs - */ -enum { UWB_RC_CTX_MAX = 256 }; - - -/** Notification chain head for UWB generated events to listeners */ -struct uwb_notifs_chain { - struct list_head list; - struct mutex mutex; -}; - -/* Beacon cache list */ -struct uwb_beca { - struct list_head list; - size_t entries; - struct mutex mutex; -}; - -/* Event handling thread. */ -struct uwbd { - int pid; - struct task_struct *task; - wait_queue_head_t wq; - struct list_head event_list; - spinlock_t event_list_lock; -}; - -/** - * struct uwb_mas_bm - a bitmap of all MAS in a superframe - * @bm: a bitmap of length #UWB_NUM_MAS - */ -struct uwb_mas_bm { - DECLARE_BITMAP(bm, UWB_NUM_MAS); - DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); - int safe; - int unsafe; -}; - -/** - * uwb_rsv_state - UWB Reservation state. - * - * NONE - reservation is not active (no DRP IE being transmitted). - * - * Owner reservation states: - * - * INITIATED - owner has sent an initial DRP request. - * PENDING - target responded with pending Reason Code. - * MODIFIED - reservation manager is modifying an established - * reservation with a different MAS allocation. - * ESTABLISHED - the reservation has been successfully negotiated. - * - * Target reservation states: - * - * DENIED - request is denied. - * ACCEPTED - request is accepted. - * PENDING - PAL has yet to make a decision to whether to accept or - * deny. - * - * FIXME: further target states TBD. - */ -enum uwb_rsv_state { - UWB_RSV_STATE_NONE = 0, - UWB_RSV_STATE_O_INITIATED, - UWB_RSV_STATE_O_PENDING, - UWB_RSV_STATE_O_MODIFIED, - UWB_RSV_STATE_O_ESTABLISHED, - UWB_RSV_STATE_O_TO_BE_MOVED, - UWB_RSV_STATE_O_MOVE_EXPANDING, - UWB_RSV_STATE_O_MOVE_COMBINING, - UWB_RSV_STATE_O_MOVE_REDUCING, - UWB_RSV_STATE_T_ACCEPTED, - UWB_RSV_STATE_T_DENIED, - UWB_RSV_STATE_T_CONFLICT, - UWB_RSV_STATE_T_PENDING, - UWB_RSV_STATE_T_EXPANDING_ACCEPTED, - UWB_RSV_STATE_T_EXPANDING_CONFLICT, - UWB_RSV_STATE_T_EXPANDING_PENDING, - UWB_RSV_STATE_T_EXPANDING_DENIED, - UWB_RSV_STATE_T_RESIZED, - - UWB_RSV_STATE_LAST, -}; - -enum uwb_rsv_target_type { - UWB_RSV_TARGET_DEV, - UWB_RSV_TARGET_DEVADDR, -}; - -/** - * struct uwb_rsv_target - the target of a reservation. - * - * Reservations unicast and targeted at a single device - * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a - * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). - */ -struct uwb_rsv_target { - enum uwb_rsv_target_type type; - union { - struct uwb_dev *dev; - struct uwb_dev_addr devaddr; - }; -}; - -struct uwb_rsv_move { - struct uwb_mas_bm final_mas; - struct uwb_ie_drp *companion_drp_ie; - struct uwb_mas_bm companion_mas; -}; - -/* - * Number of streams reserved for reservations targeted at DevAddrs. - */ -#define UWB_NUM_GLOBAL_STREAMS 1 - -typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); - -/** - * struct uwb_rsv - a DRP reservation - * - * Data structure management: - * - * @rc: the radio controller this reservation is for - * (as target or owner) - * @rc_node: a list node for the RC - * @pal_node: a list node for the PAL - * - * Owner and target parameters: - * - * @owner: the UWB device owning this reservation - * @target: the target UWB device - * @type: reservation type - * - * Owner parameters: - * - * @max_mas: maxiumum number of MAS - * @min_mas: minimum number of MAS - * @sparsity: owner selected sparsity - * @is_multicast: true iff multicast - * - * @callback: callback function when the reservation completes - * @pal_priv: private data for the PAL making the reservation - * - * Reservation status: - * - * @status: negotiation status - * @stream: stream index allocated for this reservation - * @tiebreaker: conflict tiebreaker for this reservation - * @mas: reserved MAS - * @drp_ie: the DRP IE - * @ie_valid: true iff the DRP IE matches the reservation parameters - * - * DRP reservations are uniquely identified by the owner, target and - * stream index. However, when using a DevAddr as a target (e.g., for - * a WUSB cluster reservation) the responses may be received from - * devices with different DevAddrs. In this case, reservations are - * uniquely identified by just the stream index. A number of stream - * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. - */ -struct uwb_rsv { - struct uwb_rc *rc; - struct list_head rc_node; - struct list_head pal_node; - struct kref kref; - - struct uwb_dev *owner; - struct uwb_rsv_target target; - enum uwb_drp_type type; - int max_mas; - int min_mas; - int max_interval; - bool is_multicast; - - uwb_rsv_cb_f callback; - void *pal_priv; - - enum uwb_rsv_state state; - bool needs_release_companion_mas; - u8 stream; - u8 tiebreaker; - struct uwb_mas_bm mas; - struct uwb_ie_drp *drp_ie; - struct uwb_rsv_move mv; - bool ie_valid; - struct timer_list timer; - struct work_struct handle_timeout_work; -}; - -static const -struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; - -static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) -{ - bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); -} - -/** - * struct uwb_drp_avail - a radio controller's view of MAS usage - * @global: MAS unused by neighbors (excluding reservations targeted - * or owned by the local radio controller) or the beaon period - * @local: MAS unused by local established reservations - * @pending: MAS unused by local pending reservations - * @ie: DRP Availability IE to be included in the beacon - * @ie_valid: true iff @ie is valid and does not need to regenerated from - * @global and @local - * - * Each radio controller maintains a view of MAS usage or - * availability. MAS available for a new reservation are determined - * from the intersection of @global, @local, and @pending. - * - * The radio controller must transmit a DRP Availability IE that's the - * intersection of @global and @local. - * - * A set bit indicates the MAS is unused and available. - * - * rc->rsvs_mutex should be held before accessing this data structure. - * - * [ECMA-368] section 17.4.3. - */ -struct uwb_drp_avail { - DECLARE_BITMAP(global, UWB_NUM_MAS); - DECLARE_BITMAP(local, UWB_NUM_MAS); - DECLARE_BITMAP(pending, UWB_NUM_MAS); - struct uwb_ie_drp_avail ie; - bool ie_valid; -}; - -struct uwb_drp_backoff_win { - u8 window; - u8 n; - int total_expired; - struct timer_list timer; - bool can_reserve_extra_mases; -}; - -const char *uwb_rsv_state_str(enum uwb_rsv_state state); -const char *uwb_rsv_type_str(enum uwb_drp_type type); - -struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, - void *pal_priv); -void uwb_rsv_destroy(struct uwb_rsv *rsv); - -int uwb_rsv_establish(struct uwb_rsv *rsv); -int uwb_rsv_modify(struct uwb_rsv *rsv, - int max_mas, int min_mas, int sparsity); -void uwb_rsv_terminate(struct uwb_rsv *rsv); - -void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); - -void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); - -/** - * Radio Control Interface instance - * - * - * Life cycle rules: those of the UWB Device. - * - * @index: an index number for this radio controller, as used in the - * device name. - * @version: version of protocol supported by this device - * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. - * @cmd: Backend implementation to execute commands; rw and call - * only with uwb_dev.dev.sem taken. - * @reset: Hardware reset of radio controller and any PAL controllers. - * @filter: Backend implementation to manipulate data to and from device - * to be compliant to specification assumed by driver (WHCI - * 0.95). - * - * uwb_dev.dev.mutex is used to execute commands and update - * the corresponding structures; can't use a spinlock - * because rc->cmd() can sleep. - * @ies: This is a dynamically allocated array cacheing the - * IEs (settable by the host) that the beacon of this - * radio controller is currently sending. - * - * In reality, we store here the full command we set to - * the radio controller (which is basically a command - * prefix followed by all the IEs the beacon currently - * contains). This way we don't have to realloc and - * memcpy when setting it. - * - * We set this up in uwb_rc_ie_setup(), where we alloc - * this struct, call get_ie() [so we know which IEs are - * currently being sent, if any]. - * - * @ies_capacity:Amount of space (in bytes) allocated in @ies. The - * amount used is given by sizeof(*ies) plus ies->wIELength - * (which is a little endian quantity all the time). - * @ies_mutex: protect the IE cache - * @dbg: information for the debug interface - */ -struct uwb_rc { - struct uwb_dev uwb_dev; - int index; - u16 version; - - struct module *owner; - void *priv; - int (*start)(struct uwb_rc *rc); - void (*stop)(struct uwb_rc *rc); - int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); - int (*reset)(struct uwb_rc *rc); - int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); - int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, - size_t *, size_t *); - - spinlock_t neh_lock; /* protects neh_* and ctx_* */ - struct list_head neh_list; /* Open NE handles */ - unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; - u8 ctx_roll; - - int beaconing; /* Beaconing state [channel number] */ - int beaconing_forced; - int scanning; - enum uwb_scan_type scan_type:3; - unsigned ready:1; - struct uwb_notifs_chain notifs_chain; - struct uwb_beca uwb_beca; - - struct uwbd uwbd; - - struct uwb_drp_backoff_win bow; - struct uwb_drp_avail drp_avail; - struct list_head reservations; - struct list_head cnflt_alien_list; - struct uwb_mas_bm cnflt_alien_bitmap; - struct mutex rsvs_mutex; - spinlock_t rsvs_lock; - struct workqueue_struct *rsv_workq; - - struct delayed_work rsv_update_work; - struct delayed_work rsv_alien_bp_work; - int set_drp_ie_pending; - struct mutex ies_mutex; - struct uwb_rc_cmd_set_ie *ies; - size_t ies_capacity; - - struct list_head pals; - int active_pals; - - struct uwb_dbg *dbg; -}; - - -/** - * struct uwb_pal - a UWB PAL - * @name: descriptive name for this PAL (wusbhc, wlp, etc.). - * @device: a device for the PAL. Used to link the PAL and the radio - * controller in sysfs. - * @rc: the radio controller the PAL uses. - * @channel_changed: called when the channel used by the radio changes. - * A channel of -1 means the channel has been stopped. - * @new_rsv: called when a peer requests a reservation (may be NULL if - * the PAL cannot accept reservation requests). - * @channel: channel being used by the PAL; 0 if the PAL isn't using - * the radio; -1 if the PAL wishes to use the radio but - * cannot. - * @debugfs_dir: a debugfs directory which the PAL can use for its own - * debugfs files. - * - * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB - * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). - * - * The PALs using a radio controller must register themselves to - * permit the UWB stack to coordinate usage of the radio between the - * various PALs or to allow PALs to response to certain requests from - * peers. - * - * A struct uwb_pal should be embedded in a containing structure - * belonging to the PAL and initialized with uwb_pal_init()). Fields - * should be set appropriately by the PAL before registering the PAL - * with uwb_pal_register(). - */ -struct uwb_pal { - struct list_head node; - const char *name; - struct device *device; - struct uwb_rc *rc; - - void (*channel_changed)(struct uwb_pal *pal, int channel); - void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); - - int channel; - struct dentry *debugfs_dir; -}; - -void uwb_pal_init(struct uwb_pal *pal); -int uwb_pal_register(struct uwb_pal *pal); -void uwb_pal_unregister(struct uwb_pal *pal); - -int uwb_radio_start(struct uwb_pal *pal); -void uwb_radio_stop(struct uwb_pal *pal); - -/* - * General public API - * - * This API can be used by UWB device drivers or by those implementing - * UWB Radio Controllers - */ -struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, - const struct uwb_dev_addr *devaddr); -struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); -static inline void uwb_dev_get(struct uwb_dev *uwb_dev) -{ - get_device(&uwb_dev->dev); -} -static inline void uwb_dev_put(struct uwb_dev *uwb_dev) -{ - put_device(&uwb_dev->dev); -} -struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); - -/** - * Callback function for 'uwb_{dev,rc}_foreach()'. - * - * @dev: Linux device instance - * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' - * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. - * - * @returns: 0 to continue the iterations, any other val to stop - * iterating and return the value to the caller of - * _foreach(). - */ -typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); -int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); - -struct uwb_rc *uwb_rc_alloc(void); -struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); -struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); -void uwb_rc_put(struct uwb_rc *rc); - -typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, - struct uwb_rceb *reply, ssize_t reply_size); - -int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg); -ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - struct uwb_rceb *reply, size_t reply_size); -ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - struct uwb_rceb **preply); - -size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); - -int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); -int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); -int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); -int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); -int __uwb_mac_addr_assigned_check(struct device *, void *); -int __uwb_dev_addr_assigned_check(struct device *, void *); - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, - const struct uwb_dev_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 0); -} - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, - const struct uwb_mac_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 1); -} - -/* @returns 0 if device addresses @addr2 and @addr1 are equal */ -static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, - const struct uwb_dev_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ -static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, - const struct uwb_mac_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns !0 if a MAC @addr is a broadcast address */ -static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr bcast = { - .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } - }; - return !uwb_mac_addr_cmp(addr, &bcast); -} - -/* @returns !0 if a MAC @addr is all zeroes*/ -static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr unset = { - .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } - }; - return !uwb_mac_addr_cmp(addr, &unset); -} - -/* @returns !0 if the address is in use. */ -static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, - struct uwb_dev_addr *addr) -{ - return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); -} - -/* - * UWB Radio Controller API - * - * This API is used (in addition to the general API) to implement UWB - * Radio Controllers. - */ -void uwb_rc_init(struct uwb_rc *); -int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); -void uwb_rc_rm(struct uwb_rc *); -void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); -void uwb_rc_neh_error(struct uwb_rc *, int); -void uwb_rc_reset_all(struct uwb_rc *rc); -void uwb_rc_pre_reset(struct uwb_rc *rc); -int uwb_rc_post_reset(struct uwb_rc *rc); - -/** - * uwb_rsv_is_owner - is the owner of this reservation the RC? - * @rsv: the reservation - */ -static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) -{ - return rsv->owner == &rsv->rc->uwb_dev; -} - -/** - * enum uwb_notifs - UWB events that can be passed to any listeners - * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. - * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. - * - * Higher layers can register callback functions with the radio - * controller using uwb_notifs_register(). The radio controller - * maintains a list of all registered handlers and will notify all - * nodes when an event occurs. - */ -enum uwb_notifs { - UWB_NOTIF_ONAIR, - UWB_NOTIF_OFFAIR, -}; - -/* Callback function registered with UWB */ -struct uwb_notifs_handler { - struct list_head list_node; - void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); - void *data; -}; - -int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); -int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); - - -/** - * UWB radio controller Event Size Entry (for creating entry tables) - * - * WUSB and WHCI define events and notifications, and they might have - * fixed or variable size. - * - * Each event/notification has a size which is not necessarily known - * in advance based on the event code. As well, vendor specific - * events/notifications will have a size impossible to determine - * unless we know about the device's specific details. - * - * It was way too smart of the spec writers not to think that it would - * be impossible for a generic driver to skip over vendor specific - * events/notifications if there are no LENGTH fields in the HEADER of - * each message...the transaction size cannot be counted on as the - * spec does not forbid to pack more than one event in a single - * transaction. - * - * Thus, we guess sizes with tables (or for events, when you know the - * size ahead of time you can use uwb_rc_neh_extra_size*()). We - * register tables with the known events and their sizes, and then we - * traverse those tables. For those with variable length, we provide a - * way to lookup the size inside the event/notification's - * payload. This allows device-specific event size tables to be - * registered. - * - * @size: Size of the payload - * - * @offset: if != 0, at offset @offset-1 starts a field with a length - * that has to be added to @size. The format of the field is - * given by @type. - * - * @type: Type and length of the offset field. Most common is LE 16 - * bits (that's why that is zero); others are there mostly to - * cover for bugs and weirdos. - */ -struct uwb_est_entry { - size_t size; - unsigned offset; - enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; -}; - -int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, - size_t len); - -/* -- Misc */ - -enum { - EDC_MAX_ERRORS = 10, - EDC_ERROR_TIMEFRAME = HZ, -}; - -/* error density counter */ -struct edc { - unsigned long timestart; - u16 errorcount; -}; - -static inline -void edc_init(struct edc *edc) -{ - edc->timestart = jiffies; -} - -/* Called when an error occurred. - * This is way to determine if the number of acceptable errors per time - * period has been exceeded. It is not accurate as there are cases in which - * this scheme will not work, for example if there are periodic occurrences - * of errors that straddle updates to the start time. This scheme is - * sufficient for our usage. - * - * @returns 1 if maximum acceptable errors per timeframe has been exceeded. - */ -static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) -{ - unsigned long now; - - now = jiffies; - if (now - err_hist->timestart > timeframe) { - err_hist->errorcount = 1; - err_hist->timestart = now; - } else if (++err_hist->errorcount > max_err) { - err_hist->errorcount = 0; - err_hist->timestart = now; - return 1; - } - return 0; -} - - -/* Information Element handling */ - -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); -int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); -int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); - -/* - * Transmission statistics - * - * UWB uses LQI and RSSI (one byte values) for reporting radio signal - * strength and line quality indication. We do quick and dirty - * averages of those. They are signed values, btw. - * - * For 8 bit quantities, we keep the min, the max, an accumulator - * (@sigma) and a # of samples. When @samples gets to 255, we compute - * the average (@sigma / @samples), place it in @sigma and reset - * @samples to 1 (so we use it as the first sample). - * - * Now, statistically speaking, probably I am kicking the kidneys of - * some books I have in my shelves collecting dust, but I just want to - * get an approx, not the Nobel. - * - * LOCKING: there is no locking per se, but we try to keep a lockless - * schema. Only _add_samples() modifies the values--as long as you - * have other locking on top that makes sure that no two calls of - * _add_sample() happen at the same time, then we are fine. Now, for - * resetting the values we just set @samples to 0 and that makes the - * next _add_sample() to start with defaults. Reading the values in - * _show() currently can race, so you need to make sure the calls are - * under the same lock that protects calls to _add_sample(). FIXME: - * currently unlocked (It is not ultraprecise but does the trick. Bite - * me). - */ -struct stats { - s8 min, max; - s16 sigma; - atomic_t samples; -}; - -static inline -void stats_init(struct stats *stats) -{ - atomic_set(&stats->samples, 0); - wmb(); -} - -static inline -void stats_add_sample(struct stats *stats, s8 sample) -{ - s8 min, max; - s16 sigma; - unsigned samples = atomic_read(&stats->samples); - if (samples == 0) { /* it was zero before, so we initialize */ - min = 127; - max = -128; - sigma = 0; - } else { - min = stats->min; - max = stats->max; - sigma = stats->sigma; - } - - if (sample < min) /* compute new values */ - min = sample; - else if (sample > max) - max = sample; - sigma += sample; - - stats->min = min; /* commit */ - stats->max = max; - stats->sigma = sigma; - if (atomic_add_return(1, &stats->samples) > 255) { - /* wrapped around! reset */ - stats->sigma = sigma / 256; - atomic_set(&stats->samples, 1); - } -} - -static inline ssize_t stats_show(struct stats *stats, char *buf) -{ - int min, max, avg; - int samples = atomic_read(&stats->samples); - if (samples == 0) - min = max = avg = 0; - else { - min = stats->min; - max = stats->max; - avg = stats->sigma / samples; - } - return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); -} - -static inline ssize_t stats_store(struct stats *stats, const char *buf, - size_t size) -{ - stats_init(stats); - return size; -} - -#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/drivers/staging/uwb/uwbd.c b/drivers/staging/uwb/uwbd.c deleted file mode 100644 index dc5c743d4f5f..000000000000 --- a/drivers/staging/uwb/uwbd.c +++ /dev/null @@ -1,356 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Ultra Wide Band - * Neighborhood Management Daemon - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This daemon takes care of maintaing information that describes the - * UWB neighborhood that the radios in this machine can see. It also - * keeps a tab of which devices are visible, makes sure each HC sits - * on a different channel to avoid interfering, etc. - * - * Different drivers (radio controller, device, any API in general) - * communicate with this daemon through an event queue. Daemon wakes - * up, takes a list of events and handles them one by one; handling - * function is extracted from a table based on the event's type and - * subtype. Events are freed only if the handling function says so. - * - * . Lock protecting the event list has to be an spinlock and locked - * with IRQSAVE because it might be called from an interrupt - * context (ie: when events arrive and the notification drops - * down from the ISR). - * - * . UWB radio controller drivers queue events to the daemon using - * uwbd_event_queue(). They just get the event, chew it to make it - * look like UWBD likes it and pass it in a buffer allocated with - * uwb_event_alloc(). - * - * EVENTS - * - * Events have a type, a subtype, a length, some other stuff and the - * data blob, which depends on the event. The header is 'struct - * uwb_event'; for payloads, see 'struct uwbd_evt_*'. - * - * EVENT HANDLER TABLES - * - * To find a handling function for an event, the type is used to index - * a subtype-table in the type-table. The subtype-table is indexed - * with the subtype to get the function that handles the event. Start - * with the main type-table 'uwbd_evt_type_handler'. - * - * DEVICES - * - * Devices are created when a bunch of beacons have been received and - * it is stablished that the device has stable radio presence. CREATED - * only, not configured. Devices are ONLY configured when an - * Application-Specific IE Probe is receieved, in which the device - * declares which Protocol ID it groks. Then the device is CONFIGURED - * (and the driver->probe() stuff of the device model is invoked). - * - * Devices are considered disconnected when a certain number of - * beacons are not received in an amount of time. - * - * Handler functions are called normally uwbd_evt_handle_*(). - */ -#include <linux/kthread.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/freezer.h> - -#include "uwb-internal.h" - -/* - * UWBD Event handler function signature - * - * Return !0 if the event needs not to be freed (ie the handler - * takes/took care of it). 0 means the daemon code will free the - * event. - * - * @evt->rc is already referenced and guaranteed to exist. See - * uwb_evt_handle(). - */ -typedef int (*uwbd_evt_handler_f)(struct uwb_event *); - -/** - * Properties of a UWBD event - * - * @handler: the function that will handle this event - * @name: text name of event - */ -struct uwbd_event { - uwbd_evt_handler_f handler; - const char *name; -}; - -/* Table of handlers for and properties of the UWBD Radio Control Events */ -static struct uwbd_event uwbd_urc_events[] = { - [UWB_RC_EVT_IE_RCV] = { - .handler = uwbd_evt_handle_rc_ie_rcv, - .name = "IE_RECEIVED" - }, - [UWB_RC_EVT_BEACON] = { - .handler = uwbd_evt_handle_rc_beacon, - .name = "BEACON_RECEIVED" - }, - [UWB_RC_EVT_BEACON_SIZE] = { - .handler = uwbd_evt_handle_rc_beacon_size, - .name = "BEACON_SIZE_CHANGE" - }, - [UWB_RC_EVT_BPOIE_CHANGE] = { - .handler = uwbd_evt_handle_rc_bpoie_change, - .name = "BPOIE_CHANGE" - }, - [UWB_RC_EVT_BP_SLOT_CHANGE] = { - .handler = uwbd_evt_handle_rc_bp_slot_change, - .name = "BP_SLOT_CHANGE" - }, - [UWB_RC_EVT_DRP_AVAIL] = { - .handler = uwbd_evt_handle_rc_drp_avail, - .name = "DRP_AVAILABILITY_CHANGE" - }, - [UWB_RC_EVT_DRP] = { - .handler = uwbd_evt_handle_rc_drp, - .name = "DRP" - }, - [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { - .handler = uwbd_evt_handle_rc_dev_addr_conflict, - .name = "DEV_ADDR_CONFLICT", - }, -}; - - - -struct uwbd_evt_type_handler { - const char *name; - struct uwbd_event *uwbd_events; - size_t size; -}; - -/* Table of handlers for each UWBD Event type. */ -static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { - [UWB_RC_CET_GENERAL] = { - .name = "URC", - .uwbd_events = uwbd_urc_events, - .size = ARRAY_SIZE(uwbd_urc_events), - }, -}; - -static const struct uwbd_event uwbd_message_handlers[] = { - [UWB_EVT_MSG_RESET] = { - .handler = uwbd_msg_handle_reset, - .name = "reset", - }, -}; - -/* - * Handle an URC event passed to the UWB Daemon - * - * @evt: the event to handle - * @returns: 0 if the event can be kfreed, !0 on the contrary - * (somebody else took ownership) [coincidentally, returning - * a <0 errno code will free it :)]. - * - * Looks up the two indirection tables (one for the type, one for the - * subtype) to decide which function handles it and then calls the - * handler. - * - * The event structure passed to the event handler has the radio - * controller in @evt->rc referenced. The reference will be dropped - * once the handler returns, so if it needs it for longer (async), - * it'll need to take another one. - */ -static -int uwbd_event_handle_urc(struct uwb_event *evt) -{ - int result = -EINVAL; - struct uwbd_evt_type_handler *type_table; - uwbd_evt_handler_f handler; - u8 type, context; - u16 event; - - type = evt->notif.rceb->bEventType; - event = le16_to_cpu(evt->notif.rceb->wEvent); - context = evt->notif.rceb->bEventContext; - - if (type >= ARRAY_SIZE(uwbd_urc_evt_type_handlers)) - goto out; - type_table = &uwbd_urc_evt_type_handlers[type]; - if (type_table->uwbd_events == NULL) - goto out; - if (event >= type_table->size) - goto out; - handler = type_table->uwbd_events[event].handler; - if (handler == NULL) - goto out; - - result = (*handler)(evt); -out: - if (result < 0) - dev_err(&evt->rc->uwb_dev.dev, - "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", - type, event, context, result); - return result; -} - -static void uwbd_event_handle_message(struct uwb_event *evt) -{ - struct uwb_rc *rc; - int result; - - rc = evt->rc; - - if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) { - dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message); - return; - } - - result = uwbd_message_handlers[evt->message].handler(evt); - if (result < 0) - dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", - uwbd_message_handlers[evt->message].name, result); -} - -static void uwbd_event_handle(struct uwb_event *evt) -{ - struct uwb_rc *rc; - int should_keep; - - rc = evt->rc; - - if (rc->ready) { - switch (evt->type) { - case UWB_EVT_TYPE_NOTIF: - should_keep = uwbd_event_handle_urc(evt); - if (should_keep <= 0) - kfree(evt->notif.rceb); - break; - case UWB_EVT_TYPE_MSG: - uwbd_event_handle_message(evt); - break; - default: - dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); - break; - } - } - - __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ -} - -/** - * UWB Daemon - * - * Listens to all UWB notifications and takes care to track the state - * of the UWB neighbourhood for the kernel. When we do a run, we - * spinlock, move the list to a private copy and release the - * lock. Hold it as little as possible. Not a conflict: it is - * guaranteed we own the events in the private list. - * - * FIXME: should change so we don't have a 1HZ timer all the time, but - * only if there are devices. - */ -static int uwbd(void *param) -{ - struct uwb_rc *rc = param; - unsigned long flags; - struct uwb_event *evt; - int should_stop = 0; - - while (1) { - wait_event_interruptible_timeout( - rc->uwbd.wq, - !list_empty(&rc->uwbd.event_list) - || (should_stop = kthread_should_stop()), - HZ); - if (should_stop) - break; - - spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); - if (!list_empty(&rc->uwbd.event_list)) { - evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); - list_del(&evt->list_node); - } else - evt = NULL; - spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); - - if (evt) { - uwbd_event_handle(evt); - kfree(evt); - } - - uwb_beca_purge(rc); /* Purge devices that left */ - } - return 0; -} - - -/** Start the UWB daemon */ -void uwbd_start(struct uwb_rc *rc) -{ - struct task_struct *task = kthread_run(uwbd, rc, "uwbd"); - if (IS_ERR(task)) { - rc->uwbd.task = NULL; - printk(KERN_ERR "UWB: Cannot start management daemon; " - "UWB won't work\n"); - } else { - rc->uwbd.task = task; - rc->uwbd.pid = rc->uwbd.task->pid; - } -} - -/* Stop the UWB daemon and free any unprocessed events */ -void uwbd_stop(struct uwb_rc *rc) -{ - if (rc->uwbd.task) - kthread_stop(rc->uwbd.task); - uwbd_flush(rc); -} - -/* - * Queue an event for the management daemon - * - * When some lower layer receives an event, it uses this function to - * push it forward to the UWB daemon. - * - * Once you pass the event, you don't own it any more, but the daemon - * does. It will uwb_event_free() it when done, so make sure you - * uwb_event_alloc()ed it or bad things will happen. - * - * If the daemon is not running, we just free the event. - */ -void uwbd_event_queue(struct uwb_event *evt) -{ - struct uwb_rc *rc = evt->rc; - unsigned long flags; - - spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); - if (rc->uwbd.pid != 0) { - list_add(&evt->list_node, &rc->uwbd.event_list); - wake_up_all(&rc->uwbd.wq); - } else { - __uwb_rc_put(evt->rc); - if (evt->type == UWB_EVT_TYPE_NOTIF) - kfree(evt->notif.rceb); - kfree(evt); - } - spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); - return; -} - -void uwbd_flush(struct uwb_rc *rc) -{ - struct uwb_event *evt, *nxt; - - spin_lock_irq(&rc->uwbd.event_list_lock); - list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { - if (evt->rc == rc) { - __uwb_rc_put(rc); - list_del(&evt->list_node); - if (evt->type == UWB_EVT_TYPE_NOTIF) - kfree(evt->notif.rceb); - kfree(evt); - } - } - spin_unlock_irq(&rc->uwbd.event_list_lock); -} diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c deleted file mode 100644 index a5ab255d7d36..000000000000 --- a/drivers/staging/uwb/whc-rc.c +++ /dev/null @@ -1,467 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3]) - * Radio Control command/event transport to the UWB stack - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Initialize and hook up the Radio Control interface. - * - * For each device probed, creates an 'struct whcrc' which contains - * just the representation of the UWB Radio Controller, and the logic - * for reading notifications and passing them to the UWB Core. - * - * So we initialize all of those, register the UWB Radio Controller - * and setup the notification/event handle to pipe the notifications - * to the UWB management Daemon. - * - * Once uwb_rc_add() is called, the UWB stack takes control, resets - * the radio and readies the device to take commands the UWB - * API/user-space. - * - * Note this driver is just a transport driver; the commands are - * formed at the UWB stack and given to this driver who will deliver - * them to the hw and transfer the replies/notifications back to the - * UWB stack through the UWB daemon (UWBD). - */ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/sched.h> -#include <linux/dma-mapping.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/workqueue.h> -#include "uwb.h" -#include "include/whci.h" -#include "include/umc.h" - -#include "uwb-internal.h" - -/** - * Descriptor for an instance of the UWB Radio Control Driver that - * attaches to the URC interface of the WHCI PCI card. - * - * Unless there is a lock specific to the 'data members', all access - * is protected by uwb_rc->mutex. - */ -struct whcrc { - struct umc_dev *umc_dev; - struct uwb_rc *uwb_rc; /* UWB host controller */ - - unsigned long area; - void __iomem *rc_base; - size_t rc_len; - spinlock_t irq_lock; - - void *evt_buf, *cmd_buf; - dma_addr_t evt_dma_buf, cmd_dma_buf; - wait_queue_head_t cmd_wq; - struct work_struct event_work; -}; - -/** - * Execute an UWB RC command on WHCI/RC - * - * @rc: Instance of a Radio Controller that is a whcrc - * @cmd: Buffer containing the RCCB and payload to execute - * @cmd_size: Size of the command buffer. - * - * We copy the command into whcrc->cmd_buf (as it is pretty and - * aligned`and physically contiguous) and then press the right keys in - * the controller's URCCMD register to get it to read it. We might - * have to wait for the cmd_sem to be open to us. - * - * NOTE: rc's mutex has to be locked - */ -static int whcrc_cmd(struct uwb_rc *uwb_rc, - const struct uwb_rccb *cmd, size_t cmd_size) -{ - int result = 0; - struct whcrc *whcrc = uwb_rc->priv; - struct device *dev = &whcrc->umc_dev->dev; - u32 urccmd; - - if (cmd_size >= 4096) - return -EINVAL; - - /* - * If the URC is halted, then the hardware has reset itself. - * Attempt to recover by restarting the device and then return - * an error as it's likely that the current command isn't - * valid for a newly started RC. - */ - if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { - dev_err(dev, "requesting reset of halted radio controller\n"); - uwb_rc_reset_all(uwb_rc); - return -EIO; - } - - result = wait_event_timeout(whcrc->cmd_wq, - !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); - if (result == 0) { - dev_err(dev, "device is not ready to execute commands\n"); - return -ETIMEDOUT; - } - - memmove(whcrc->cmd_buf, cmd, cmd_size); - le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR); - - spin_lock(&whcrc->irq_lock); - urccmd = le_readl(whcrc->rc_base + URCCMD); - urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK); - le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size, - whcrc->rc_base + URCCMD); - spin_unlock(&whcrc->irq_lock); - - return 0; -} - -static int whcrc_reset(struct uwb_rc *rc) -{ - struct whcrc *whcrc = rc->priv; - - return umc_controller_reset(whcrc->umc_dev); -} - -/** - * Reset event reception mechanism and tell hw we are ready to get more - * - * We have read all the events in the event buffer, so we are ready to - * reset it to the beginning. - * - * This is only called during initialization or after an event buffer - * has been retired. This means we can be sure that event processing - * is disabled and it's safe to update the URCEVTADDR register. - * - * There's no need to wait for the event processing to start as the - * URC will not clear URCCMD_ACTIVE until (internal) event buffer - * space is available. - */ -static -void whcrc_enable_events(struct whcrc *whcrc) -{ - u32 urccmd; - - le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); - - spin_lock(&whcrc->irq_lock); - urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; - le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); - spin_unlock(&whcrc->irq_lock); -} - -static void whcrc_event_work(struct work_struct *work) -{ - struct whcrc *whcrc = container_of(work, struct whcrc, event_work); - size_t size; - u64 urcevtaddr; - - urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); - size = urcevtaddr & URCEVTADDR_OFFSET_MASK; - - uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); - whcrc_enable_events(whcrc); -} - -/** - * Catch interrupts? - * - * We ack inmediately (and expect the hw to do the right thing and - * raise another IRQ if things have changed :) - */ -static -irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) -{ - struct whcrc *whcrc = _whcrc; - struct device *dev = &whcrc->umc_dev->dev; - u32 urcsts; - - urcsts = le_readl(whcrc->rc_base + URCSTS); - if (!(urcsts & URCSTS_INT_MASK)) - return IRQ_NONE; - le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); - - if (urcsts & URCSTS_HSE) { - dev_err(dev, "host system error -- hardware halted\n"); - /* FIXME: do something sensible here */ - goto out; - } - if (urcsts & URCSTS_ER) - schedule_work(&whcrc->event_work); - if (urcsts & URCSTS_RCI) - wake_up_all(&whcrc->cmd_wq); -out: - return IRQ_HANDLED; -} - - -/** - * Initialize a UMC RC interface: map regions, get (shared) IRQ - */ -static -int whcrc_setup_rc_umc(struct whcrc *whcrc) -{ - int result = 0; - struct device *dev = &whcrc->umc_dev->dev; - struct umc_dev *umc_dev = whcrc->umc_dev; - - whcrc->area = umc_dev->resource.start; - whcrc->rc_len = resource_size(&umc_dev->resource); - result = -EBUSY; - if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { - dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", - whcrc->rc_len, whcrc->area, result); - goto error_request_region; - } - - whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len); - if (whcrc->rc_base == NULL) { - dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", - whcrc->rc_len, whcrc->area, result); - goto error_ioremap; - } - - result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, - KBUILD_MODNAME, whcrc); - if (result < 0) { - dev_err(dev, "can't allocate IRQ %d: %d\n", - umc_dev->irq, result); - goto error_request_irq; - } - - result = -ENOMEM; - whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, - &whcrc->cmd_dma_buf, GFP_KERNEL); - if (whcrc->cmd_buf == NULL) { - dev_err(dev, "Can't allocate cmd transfer buffer\n"); - goto error_cmd_buffer; - } - - whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, - &whcrc->evt_dma_buf, GFP_KERNEL); - if (whcrc->evt_buf == NULL) { - dev_err(dev, "Can't allocate evt transfer buffer\n"); - goto error_evt_buffer; - } - return 0; - -error_evt_buffer: - dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, - whcrc->cmd_dma_buf); -error_cmd_buffer: - free_irq(umc_dev->irq, whcrc); -error_request_irq: - iounmap(whcrc->rc_base); -error_ioremap: - release_mem_region(whcrc->area, whcrc->rc_len); -error_request_region: - return result; -} - - -/** - * Release RC's UMC resources - */ -static -void whcrc_release_rc_umc(struct whcrc *whcrc) -{ - struct umc_dev *umc_dev = whcrc->umc_dev; - - dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf, - whcrc->evt_dma_buf); - dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, - whcrc->cmd_dma_buf); - free_irq(umc_dev->irq, whcrc); - iounmap(whcrc->rc_base); - release_mem_region(whcrc->area, whcrc->rc_len); -} - - -/** - * whcrc_start_rc - start a WHCI radio controller - * @whcrc: the radio controller to start - * - * Reset the UMC device, start the radio controller, enable events and - * finally enable interrupts. - */ -static int whcrc_start_rc(struct uwb_rc *rc) -{ - struct whcrc *whcrc = rc->priv; - struct device *dev = &whcrc->umc_dev->dev; - - /* Reset the thing */ - le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); - if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, - 5000, "hardware reset") < 0) - return -EBUSY; - - /* Set the event buffer, start the controller (enable IRQs later) */ - le_writel(0, whcrc->rc_base + URCINTR); - le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); - if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, - 5000, "radio controller start") < 0) - return -ETIMEDOUT; - whcrc_enable_events(whcrc); - le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); - return 0; -} - - -/** - * whcrc_stop_rc - stop a WHCI radio controller - * @whcrc: the radio controller to stop - * - * Disable interrupts and cancel any pending event processing work - * before clearing the Run/Stop bit. - */ -static -void whcrc_stop_rc(struct uwb_rc *rc) -{ - struct whcrc *whcrc = rc->priv; - struct umc_dev *umc_dev = whcrc->umc_dev; - - le_writel(0, whcrc->rc_base + URCINTR); - cancel_work_sync(&whcrc->event_work); - - le_writel(0, whcrc->rc_base + URCCMD); - whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, - URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); -} - -static void whcrc_init(struct whcrc *whcrc) -{ - spin_lock_init(&whcrc->irq_lock); - init_waitqueue_head(&whcrc->cmd_wq); - INIT_WORK(&whcrc->event_work, whcrc_event_work); -} - -/** - * Initialize the radio controller. - * - * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the - * IRQ handler we use that to determine if the hw is ready to - * handle events. Looks like a race condition, but it really is - * not. - */ -static -int whcrc_probe(struct umc_dev *umc_dev) -{ - int result; - struct uwb_rc *uwb_rc; - struct whcrc *whcrc; - struct device *dev = &umc_dev->dev; - - result = -ENOMEM; - uwb_rc = uwb_rc_alloc(); - if (uwb_rc == NULL) { - dev_err(dev, "unable to allocate RC instance\n"); - goto error_rc_alloc; - } - whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL); - if (whcrc == NULL) { - dev_err(dev, "unable to allocate WHC-RC instance\n"); - goto error_alloc; - } - whcrc_init(whcrc); - whcrc->umc_dev = umc_dev; - - result = whcrc_setup_rc_umc(whcrc); - if (result < 0) { - dev_err(dev, "Can't setup RC UMC interface: %d\n", result); - goto error_setup_rc_umc; - } - whcrc->uwb_rc = uwb_rc; - - uwb_rc->owner = THIS_MODULE; - uwb_rc->cmd = whcrc_cmd; - uwb_rc->reset = whcrc_reset; - uwb_rc->start = whcrc_start_rc; - uwb_rc->stop = whcrc_stop_rc; - - result = uwb_rc_add(uwb_rc, dev, whcrc); - if (result < 0) - goto error_rc_add; - umc_set_drvdata(umc_dev, whcrc); - return 0; - -error_rc_add: - whcrc_release_rc_umc(whcrc); -error_setup_rc_umc: - kfree(whcrc); -error_alloc: - uwb_rc_put(uwb_rc); -error_rc_alloc: - return result; -} - -/** - * Clean up the radio control resources - * - * When we up the command semaphore, everybody possibly held trying to - * execute a command should be granted entry and then they'll see the - * host is quiescing and up it (so it will chain to the next waiter). - * This should not happen (in any case), as we can only remove when - * there are no handles open... - */ -static void whcrc_remove(struct umc_dev *umc_dev) -{ - struct whcrc *whcrc = umc_get_drvdata(umc_dev); - struct uwb_rc *uwb_rc = whcrc->uwb_rc; - - umc_set_drvdata(umc_dev, NULL); - uwb_rc_rm(uwb_rc); - whcrc_release_rc_umc(whcrc); - kfree(whcrc); - uwb_rc_put(uwb_rc); -} - -static int whcrc_pre_reset(struct umc_dev *umc) -{ - struct whcrc *whcrc = umc_get_drvdata(umc); - struct uwb_rc *uwb_rc = whcrc->uwb_rc; - - uwb_rc_pre_reset(uwb_rc); - return 0; -} - -static int whcrc_post_reset(struct umc_dev *umc) -{ - struct whcrc *whcrc = umc_get_drvdata(umc); - struct uwb_rc *uwb_rc = whcrc->uwb_rc; - - return uwb_rc_post_reset(uwb_rc); -} - -/* PCI device ID's that we handle [so it gets loaded] */ -static struct pci_device_id __used whcrc_id_table[] = { - { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, - { /* empty last entry */ } -}; -MODULE_DEVICE_TABLE(pci, whcrc_id_table); - -static struct umc_driver whcrc_driver = { - .name = "whc-rc", - .cap_id = UMC_CAP_ID_WHCI_RC, - .probe = whcrc_probe, - .remove = whcrc_remove, - .pre_reset = whcrc_pre_reset, - .post_reset = whcrc_post_reset, -}; - -static int __init whcrc_driver_init(void) -{ - return umc_driver_register(&whcrc_driver); -} -module_init(whcrc_driver_init); - -static void __exit whcrc_driver_exit(void) -{ - umc_driver_unregister(&whcrc_driver); -} -module_exit(whcrc_driver_exit); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/uwb/whci.c b/drivers/staging/uwb/whci.c deleted file mode 100644 index a8832f64d708..000000000000 --- a/drivers/staging/uwb/whci.c +++ /dev/null @@ -1,257 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * WHCI UWB Multi-interface Controller enumerator. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/slab.h> -#include "include/whci.h" -#include "include/umc.h" - -struct whci_card { - struct pci_dev *pci; - void __iomem *uwbbase; - u8 n_caps; - struct umc_dev *devs[0]; -}; - - -/* Fix faulty HW :( */ -static -u64 whci_capdata_quirks(struct whci_card *card, u64 capdata) -{ - u64 capdata_orig = capdata; - struct pci_dev *pci_dev = card->pci; - if (pci_dev->vendor == PCI_VENDOR_ID_INTEL - && (pci_dev->device == 0x0c3b || pci_dev->device == 0004) - && pci_dev->class == 0x0d1010) { - switch (UWBCAPDATA_TO_CAP_ID(capdata)) { - /* WLP capability has 0x100 bytes of aperture */ - case 0x80: - capdata |= 0x40 << 8; break; - /* WUSB capability has 0x80 bytes of aperture - * and ID is 1 */ - case 0x02: - capdata &= ~0xffff; - capdata |= 0x2001; - break; - } - } - if (capdata_orig != capdata) - dev_warn(&pci_dev->dev, - "PCI v%04x d%04x c%06x#%02x: " - "corrected capdata from %016Lx to %016Lx\n", - pci_dev->vendor, pci_dev->device, pci_dev->class, - (unsigned)UWBCAPDATA_TO_CAP_ID(capdata), - (unsigned long long)capdata_orig, - (unsigned long long)capdata); - return capdata; -} - - -/** - * whci_wait_for - wait for a WHCI register to be set - * - * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'. - */ -int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, - unsigned long max_ms, const char *tag) -{ - unsigned t = 0; - u32 val; - for (;;) { - val = le_readl(reg); - if ((val & mask) == result) - break; - if (t >= max_ms) { - dev_err(dev, "%s timed out\n", tag); - return -ETIMEDOUT; - } - msleep(10); - t += 10; - } - return 0; -} -EXPORT_SYMBOL_GPL(whci_wait_for); - - -/* - * NOTE: the capinfo and capdata registers are slightly different - * (size and cap-id fields). So for cap #0, we need to fill - * in. Size comes from the size of the register block - * (statically calculated); cap_id comes from nowhere, we use - * zero, that is reserved, for the radio controller, because - * none was defined at the spec level. - */ -static int whci_add_cap(struct whci_card *card, int n) -{ - struct umc_dev *umc; - u64 capdata; - int bar, err; - - umc = umc_device_create(&card->pci->dev, n); - if (umc == NULL) - return -ENOMEM; - - capdata = le_readq(card->uwbbase + UWBCAPDATA(n)); - - bar = UWBCAPDATA_TO_BAR(capdata) << 1; - - capdata = whci_capdata_quirks(card, capdata); - /* Capability 0 is the radio controller. It's size is 32 - * bytes (WHCI0.95[2.3, T2-9]). */ - umc->version = UWBCAPDATA_TO_VERSION(capdata); - umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata); - umc->bar = bar; - umc->resource.start = pci_resource_start(card->pci, bar) - + UWBCAPDATA_TO_OFFSET(capdata); - umc->resource.end = umc->resource.start - + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; - umc->resource.name = dev_name(&umc->dev); - umc->resource.flags = card->pci->resource[bar].flags; - umc->resource.parent = &card->pci->resource[bar]; - umc->irq = card->pci->irq; - - err = umc_device_register(umc); - if (err < 0) - goto error; - card->devs[n] = umc; - return 0; - -error: - kfree(umc); - return err; -} - -static void whci_del_cap(struct whci_card *card, int n) -{ - struct umc_dev *umc = card->devs[n]; - - umc_device_unregister(umc); -} - -static int whci_n_caps(struct pci_dev *pci) -{ - void __iomem *uwbbase; - u64 capinfo; - - uwbbase = pci_iomap(pci, 0, 8); - if (!uwbbase) - return -ENOMEM; - capinfo = le_readq(uwbbase + UWBCAPINFO); - pci_iounmap(pci, uwbbase); - - return UWBCAPINFO_TO_N_CAPS(capinfo); -} - -static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id) -{ - struct whci_card *card; - int err, n_caps, n; - - err = pci_enable_device(pci); - if (err < 0) - goto error; - pci_enable_msi(pci); - pci_set_master(pci); - err = -ENXIO; - if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64))) - pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); - else if (!pci_set_dma_mask(pci, DMA_BIT_MASK(32))) - pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); - else - goto error_dma; - - err = n_caps = whci_n_caps(pci); - if (n_caps < 0) - goto error_ncaps; - - err = -ENOMEM; - card = kzalloc(sizeof(struct whci_card) - + sizeof(struct umc_dev *) * (n_caps + 1), - GFP_KERNEL); - if (card == NULL) - goto error_kzalloc; - card->pci = pci; - card->n_caps = n_caps; - - err = -EBUSY; - if (!request_mem_region(pci_resource_start(pci, 0), - UWBCAPDATA_SIZE(card->n_caps), - "whci (capability data)")) - goto error_request_memregion; - err = -ENOMEM; - card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps)); - if (!card->uwbbase) - goto error_iomap; - - /* Add each capability. */ - for (n = 0; n <= card->n_caps; n++) { - err = whci_add_cap(card, n); - if (err < 0 && n == 0) { - dev_err(&pci->dev, "cannot bind UWB radio controller:" - " %d\n", err); - goto error_bind; - } - if (err < 0) - dev_warn(&pci->dev, "warning: cannot bind capability " - "#%u: %d\n", n, err); - } - pci_set_drvdata(pci, card); - return 0; - -error_bind: - pci_iounmap(pci, card->uwbbase); -error_iomap: - release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); -error_request_memregion: - kfree(card); -error_kzalloc: -error_ncaps: -error_dma: - pci_disable_msi(pci); - pci_disable_device(pci); -error: - return err; -} - -static void whci_remove(struct pci_dev *pci) -{ - struct whci_card *card = pci_get_drvdata(pci); - int n; - - pci_set_drvdata(pci, NULL); - /* Unregister each capability in reverse (so the master device - * is unregistered last). */ - for (n = card->n_caps; n >= 0 ; n--) - whci_del_cap(card, n); - pci_iounmap(pci, card->uwbbase); - release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); - kfree(card); - pci_disable_msi(pci); - pci_disable_device(pci); -} - -static struct pci_device_id whci_id_table[] = { - { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, - { 0 }, -}; -MODULE_DEVICE_TABLE(pci, whci_id_table); - - -static struct pci_driver whci_driver = { - .name = "whci", - .id_table = whci_id_table, - .probe = whci_probe, - .remove = whci_remove, -}; - -module_pci_driver(whci_driver); -MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator"); -MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index 1ef31a984741..597acef35d0b 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -1776,7 +1776,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev, video_set_drvdata(vfd, dev); ret = video_register_device(vfd, - VFL_TYPE_GRABBER, + VFL_TYPE_VIDEO, video_nr[dev->camera_num]); if (ret < 0) return ret; diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c index 89786c264867..5137fcf203d6 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/controls.c +++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c @@ -85,7 +85,6 @@ struct bm2835_mmal_v4l2_ctrl { const s64 *imenu; /* integer menu array */ u32 mmal_id; /* mmal parameter id */ bm2835_mmal_v4l2_ctrl_cb *setter; - bool ignore_errors; }; struct v4l2_to_mmal_effects_setting { @@ -912,8 +911,6 @@ static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl) if (ret) pr_warn("ctrl id:%d/MMAL param %08X- returned ret %d\n", ctrl->id, mmal_ctrl->mmal_id, ret); - if (mmal_ctrl->ignore_errors) - ret = 0; return ret; } @@ -923,239 +920,340 @@ static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = { static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = { { - V4L2_CID_SATURATION, MMAL_CONTROL_TYPE_STD, - -100, 100, 0, 1, NULL, - MMAL_PARAMETER_SATURATION, - ctrl_set_rational, - false + .id = V4L2_CID_SATURATION, + .type = MMAL_CONTROL_TYPE_STD, + .min = -100, + .max = 100, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_SATURATION, + .setter = ctrl_set_rational, }, { - V4L2_CID_SHARPNESS, MMAL_CONTROL_TYPE_STD, - -100, 100, 0, 1, NULL, - MMAL_PARAMETER_SHARPNESS, - ctrl_set_rational, - false + .id = V4L2_CID_SHARPNESS, + .type = MMAL_CONTROL_TYPE_STD, + .min = -100, + .max = 100, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_SHARPNESS, + .setter = ctrl_set_rational, }, { - V4L2_CID_CONTRAST, MMAL_CONTROL_TYPE_STD, - -100, 100, 0, 1, NULL, - MMAL_PARAMETER_CONTRAST, - ctrl_set_rational, - false + .id = V4L2_CID_CONTRAST, + .type = MMAL_CONTROL_TYPE_STD, + .min = -100, + .max = 100, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_CONTRAST, + .setter = ctrl_set_rational, }, { - V4L2_CID_BRIGHTNESS, MMAL_CONTROL_TYPE_STD, - 0, 100, 50, 1, NULL, - MMAL_PARAMETER_BRIGHTNESS, - ctrl_set_rational, - false + .id = V4L2_CID_BRIGHTNESS, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 100, + .def = 50, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_BRIGHTNESS, + .setter = ctrl_set_rational, }, { - V4L2_CID_ISO_SENSITIVITY, MMAL_CONTROL_TYPE_INT_MENU, - 0, ARRAY_SIZE(iso_qmenu) - 1, 0, 1, iso_qmenu, - MMAL_PARAMETER_ISO, - ctrl_set_iso, - false + .id = V4L2_CID_ISO_SENSITIVITY, + .type = MMAL_CONTROL_TYPE_INT_MENU, + .min = 0, + .max = ARRAY_SIZE(iso_qmenu) - 1, + .def = 0, + .step = 1, + .imenu = iso_qmenu, + .mmal_id = MMAL_PARAMETER_ISO, + .setter = ctrl_set_iso, }, { - V4L2_CID_ISO_SENSITIVITY_AUTO, MMAL_CONTROL_TYPE_STD_MENU, - 0, V4L2_ISO_SENSITIVITY_AUTO, V4L2_ISO_SENSITIVITY_AUTO, 1, - NULL, MMAL_PARAMETER_ISO, - ctrl_set_iso, - false + .id = V4L2_CID_ISO_SENSITIVITY_AUTO, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = 0, + .max = V4L2_ISO_SENSITIVITY_AUTO, + .def = V4L2_ISO_SENSITIVITY_AUTO, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_ISO, + .setter = ctrl_set_iso, }, { - V4L2_CID_IMAGE_STABILIZATION, MMAL_CONTROL_TYPE_STD, - 0, 1, 0, 1, NULL, - MMAL_PARAMETER_VIDEO_STABILISATION, - ctrl_set_value, - false + .id = V4L2_CID_IMAGE_STABILIZATION, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_VIDEO_STABILISATION, + .setter = ctrl_set_value, }, { - V4L2_CID_EXPOSURE_AUTO, MMAL_CONTROL_TYPE_STD_MENU, - ~0x03, V4L2_EXPOSURE_APERTURE_PRIORITY, V4L2_EXPOSURE_AUTO, 0, - NULL, MMAL_PARAMETER_EXPOSURE_MODE, - ctrl_set_exposure, - false + .id = V4L2_CID_EXPOSURE_AUTO, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = ~0x03, + .max = V4L2_EXPOSURE_APERTURE_PRIORITY, + .def = V4L2_EXPOSURE_AUTO, + .step = 0, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_EXPOSURE_MODE, + .setter = ctrl_set_exposure, }, { - V4L2_CID_EXPOSURE_ABSOLUTE, MMAL_CONTROL_TYPE_STD, + .id = V4L2_CID_EXPOSURE_ABSOLUTE, + .type = MMAL_CONTROL_TYPE_STD, /* Units of 100usecs */ - 1, 1 * 1000 * 10, 100 * 10, 1, NULL, - MMAL_PARAMETER_SHUTTER_SPEED, - ctrl_set_exposure, - false + .min = 1, + .max = 1 * 1000 * 10, + .def = 100 * 10, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_SHUTTER_SPEED, + .setter = ctrl_set_exposure, }, { - V4L2_CID_AUTO_EXPOSURE_BIAS, MMAL_CONTROL_TYPE_INT_MENU, - 0, ARRAY_SIZE(ev_bias_qmenu) - 1, - (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, 0, ev_bias_qmenu, - MMAL_PARAMETER_EXPOSURE_COMP, - ctrl_set_value_ev, - false + .id = V4L2_CID_AUTO_EXPOSURE_BIAS, + .type = MMAL_CONTROL_TYPE_INT_MENU, + .min = 0, + .max = ARRAY_SIZE(ev_bias_qmenu) - 1, + .def = (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, + .step = 0, + .imenu = ev_bias_qmenu, + .mmal_id = MMAL_PARAMETER_EXPOSURE_COMP, + .setter = ctrl_set_value_ev, }, { - V4L2_CID_EXPOSURE_AUTO_PRIORITY, MMAL_CONTROL_TYPE_STD, - 0, 1, - 0, 1, NULL, - 0, /* Dummy MMAL ID as it gets mapped into FPS range*/ - ctrl_set_exposure, - false + .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + .imenu = NULL, + /* Dummy MMAL ID as it gets mapped into FPS range */ + .mmal_id = 0, + .setter = ctrl_set_exposure, }, { - V4L2_CID_EXPOSURE_METERING, - MMAL_CONTROL_TYPE_STD_MENU, - ~0x7, V4L2_EXPOSURE_METERING_SPOT, - V4L2_EXPOSURE_METERING_AVERAGE, 0, NULL, - MMAL_PARAMETER_EXP_METERING_MODE, - ctrl_set_metering_mode, - false + .id = V4L2_CID_EXPOSURE_METERING, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = ~0x7, + .max = V4L2_EXPOSURE_METERING_SPOT, + .def = V4L2_EXPOSURE_METERING_AVERAGE, + .step = 0, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_EXP_METERING_MODE, + .setter = ctrl_set_metering_mode, }, { - V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE, - MMAL_CONTROL_TYPE_STD_MENU, - ~0x3ff, V4L2_WHITE_BALANCE_SHADE, V4L2_WHITE_BALANCE_AUTO, 0, - NULL, - MMAL_PARAMETER_AWB_MODE, - ctrl_set_awb_mode, - false + .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = ~0x3ff, + .max = V4L2_WHITE_BALANCE_SHADE, + .def = V4L2_WHITE_BALANCE_AUTO, + .step = 0, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_AWB_MODE, + .setter = ctrl_set_awb_mode, }, { - V4L2_CID_RED_BALANCE, MMAL_CONTROL_TYPE_STD, - 1, 7999, 1000, 1, NULL, - MMAL_PARAMETER_CUSTOM_AWB_GAINS, - ctrl_set_awb_gains, - false + .id = V4L2_CID_RED_BALANCE, + .type = MMAL_CONTROL_TYPE_STD, + .min = 1, + .max = 7999, + .def = 1000, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS, + .setter = ctrl_set_awb_gains, }, { - V4L2_CID_BLUE_BALANCE, MMAL_CONTROL_TYPE_STD, - 1, 7999, 1000, 1, NULL, - MMAL_PARAMETER_CUSTOM_AWB_GAINS, - ctrl_set_awb_gains, - false + .id = V4L2_CID_BLUE_BALANCE, + .type = MMAL_CONTROL_TYPE_STD, + .min = 1, + .max = 7999, + .def = 1000, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS, + .setter = ctrl_set_awb_gains, }, { - V4L2_CID_COLORFX, MMAL_CONTROL_TYPE_STD_MENU, - 0, V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_NONE, 0, NULL, - MMAL_PARAMETER_IMAGE_EFFECT, - ctrl_set_image_effect, - false + .id = V4L2_CID_COLORFX, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = 0, + .max = V4L2_COLORFX_SET_CBCR, + .def = V4L2_COLORFX_NONE, + .step = 0, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_IMAGE_EFFECT, + .setter = ctrl_set_image_effect, }, { - V4L2_CID_COLORFX_CBCR, MMAL_CONTROL_TYPE_STD, - 0, 0xffff, 0x8080, 1, NULL, - MMAL_PARAMETER_COLOUR_EFFECT, - ctrl_set_colfx, - false + .id = V4L2_CID_COLORFX_CBCR, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 0xffff, + .def = 0x8080, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_COLOUR_EFFECT, + .setter = ctrl_set_colfx, }, { - V4L2_CID_ROTATE, MMAL_CONTROL_TYPE_STD, - 0, 360, 0, 90, NULL, - MMAL_PARAMETER_ROTATION, - ctrl_set_rotate, - false + .id = V4L2_CID_ROTATE, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 360, + .def = 0, + .step = 90, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_ROTATION, + .setter = ctrl_set_rotate, }, { - V4L2_CID_HFLIP, MMAL_CONTROL_TYPE_STD, - 0, 1, 0, 1, NULL, - MMAL_PARAMETER_MIRROR, - ctrl_set_flip, - false + .id = V4L2_CID_HFLIP, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_MIRROR, + .setter = ctrl_set_flip, }, { - V4L2_CID_VFLIP, MMAL_CONTROL_TYPE_STD, - 0, 1, 0, 1, NULL, - MMAL_PARAMETER_MIRROR, - ctrl_set_flip, - false + .id = V4L2_CID_VFLIP, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_MIRROR, + .setter = ctrl_set_flip, }, { - V4L2_CID_MPEG_VIDEO_BITRATE_MODE, MMAL_CONTROL_TYPE_STD_MENU, - 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, - 0, 0, NULL, - MMAL_PARAMETER_RATECONTROL, - ctrl_set_bitrate_mode, - false + .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = 0, + .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + .def = 0, + .step = 0, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_RATECONTROL, + .setter = ctrl_set_bitrate_mode, }, { - V4L2_CID_MPEG_VIDEO_BITRATE, MMAL_CONTROL_TYPE_STD, - 25 * 1000, 25 * 1000 * 1000, 10 * 1000 * 1000, 25 * 1000, NULL, - MMAL_PARAMETER_VIDEO_BIT_RATE, - ctrl_set_bitrate, - false + .id = V4L2_CID_MPEG_VIDEO_BITRATE, + .type = MMAL_CONTROL_TYPE_STD, + .min = 25 * 1000, + .max = 25 * 1000 * 1000, + .def = 10 * 1000 * 1000, + .step = 25 * 1000, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_VIDEO_BIT_RATE, + .setter = ctrl_set_bitrate, }, { - V4L2_CID_JPEG_COMPRESSION_QUALITY, MMAL_CONTROL_TYPE_STD, - 1, 100, - 30, 1, NULL, - MMAL_PARAMETER_JPEG_Q_FACTOR, - ctrl_set_image_encode_output, - false + .id = V4L2_CID_JPEG_COMPRESSION_QUALITY, + .type = MMAL_CONTROL_TYPE_STD, + .min = 1, + .max = 100, + .def = 30, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_JPEG_Q_FACTOR, + .setter = ctrl_set_image_encode_output, }, { - V4L2_CID_POWER_LINE_FREQUENCY, MMAL_CONTROL_TYPE_STD_MENU, - 0, V4L2_CID_POWER_LINE_FREQUENCY_AUTO, - 1, 1, NULL, - MMAL_PARAMETER_FLICKER_AVOID, - ctrl_set_flicker_avoidance, - false + .id = V4L2_CID_POWER_LINE_FREQUENCY, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = 0, + .max = V4L2_CID_POWER_LINE_FREQUENCY_AUTO, + .def = 1, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_FLICKER_AVOID, + .setter = ctrl_set_flicker_avoidance, }, { - V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER, MMAL_CONTROL_TYPE_STD, - 0, 1, - 0, 1, NULL, - MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER, - ctrl_set_video_encode_param_output, - false + .id = V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER, + .setter = ctrl_set_video_encode_param_output, }, { - V4L2_CID_MPEG_VIDEO_H264_PROFILE, - MMAL_CONTROL_TYPE_STD_MENU, - ~(BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | - BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | - BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | - BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)), - V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, - V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1, NULL, - MMAL_PARAMETER_PROFILE, - ctrl_set_video_encode_profile_level, - false + .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = ~(BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)), + .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + .def = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_PROFILE, + .setter = ctrl_set_video_encode_profile_level, }, { - V4L2_CID_MPEG_VIDEO_H264_LEVEL, MMAL_CONTROL_TYPE_STD_MENU, - ~(BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | - BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0)), - V4L2_MPEG_VIDEO_H264_LEVEL_4_0, - V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 1, NULL, - MMAL_PARAMETER_PROFILE, - ctrl_set_video_encode_profile_level, - false + .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, + .type = MMAL_CONTROL_TYPE_STD_MENU, + .min = ~(BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | + BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0)), + .max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0, + .def = V4L2_MPEG_VIDEO_H264_LEVEL_4_0, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_PROFILE, + .setter = ctrl_set_video_encode_profile_level, }, { - V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU, - -1, /* Min (mask) is computed at runtime */ - V4L2_SCENE_MODE_TEXT, - V4L2_SCENE_MODE_NONE, 1, NULL, - MMAL_PARAMETER_PROFILE, - ctrl_set_scene_mode, - false + .id = V4L2_CID_SCENE_MODE, + .type = MMAL_CONTROL_TYPE_STD_MENU, + /* mask is computed at runtime */ + .min = -1, + .max = V4L2_SCENE_MODE_TEXT, + .def = V4L2_SCENE_MODE_NONE, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_PROFILE, + .setter = ctrl_set_scene_mode, }, { - V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, MMAL_CONTROL_TYPE_STD, - 0, 0x7FFFFFFF, 60, 1, NULL, - MMAL_PARAMETER_INTRAPERIOD, - ctrl_set_video_encode_param_output, - false + .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, + .type = MMAL_CONTROL_TYPE_STD, + .min = 0, + .max = 0x7FFFFFFF, + .def = 60, + .step = 1, + .imenu = NULL, + .mmal_id = MMAL_PARAMETER_INTRAPERIOD, + .setter = ctrl_set_video_encode_param_output, }, }; @@ -1168,7 +1266,7 @@ int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev) if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) { ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c], &v4l2_ctrls[c]); - if (!v4l2_ctrls[c].ignore_errors && ret) { + if (ret) { v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "Failed when setting default values for ctrl %d\n", c); diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h index 141af16ce031..7fc04e38936d 100644 --- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h +++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h @@ -33,10 +33,14 @@ enum vchi_crc_control { enum vchi_callback_reason { VCHI_CALLBACK_REASON_MIN, - //This indicates that there is data available - //handle is the msg id that was transmitted with the data - // When a message is received and there was no FULL message available previously, send callback - // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails + /* + * This indicates that there is data available handle is the msg id that + * was transmitted with the data + * When a message is received and there was no FULL message available + * previously, send callback + * Tasks get kicked by the callback, reset their event and try and read + * from the fifo until it fails + */ VCHI_CALLBACK_MSG_AVAILABLE, VCHI_CALLBACK_MSG_SENT, VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented @@ -51,9 +55,11 @@ enum vchi_callback_reason { VCHI_CALLBACK_SERVICE_CLOSED, - // this side has sent XOFF to peer due to lack of data consumption by service - // (suggests the service may need to take some recovery action if it has - // been deliberately holding off consuming data) + /* + * this side has sent XOFF to peer due to lack of data consumption by + * service (suggests the service may need to take some recovery action + * if it has been deliberately holding off consuming data) + */ VCHI_CALLBACK_SENT_XOFF, VCHI_CALLBACK_SENT_XON, @@ -112,12 +118,16 @@ struct vchi_msg_vector { int32_t vec_len; }; -// Iterator structure for reading ahead through received message queue. Allocated by client, -// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only. -// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead - -// will not proceed to messages received since. Behaviour is undefined if an iterator -// is used again after messages for that service are removed/dequeued by any -// means other than vchi_msg_iter_... calls on the iterator itself. +/* + * Iterator structure for reading ahead through received message queue. + * Allocated by client, initialised by vchi_msg_look_ahead. Fields are for + * internal VCHI use only. + * Iterates over messages in queue at the instant of the call to + * vchi_msg_lookahead - will not proceed to messages received since. + * Behaviour is undefined if an iterator is used again after messages for that + * service are removed/dequeued by any means other than vchi_msg_iter_... + * calls on the iterator itself. + */ struct vchi_msg_iter { struct opaque_vchi_service_t *service; void *last; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index ca30bfd52919..c18c6ca0b6c0 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -257,49 +257,6 @@ int vchiq_dump_platform_state(void *dump_context) return vchiq_dump(dump_context, buf, len + 1); } -enum vchiq_status -vchiq_platform_suspend(struct vchiq_state *state) -{ - return VCHIQ_ERROR; -} - -enum vchiq_status -vchiq_platform_resume(struct vchiq_state *state) -{ - return VCHIQ_SUCCESS; -} - -void -vchiq_platform_paused(struct vchiq_state *state) -{ -} - -void -vchiq_platform_resumed(struct vchiq_state *state) -{ -} - -int -vchiq_platform_videocore_wanted(struct vchiq_state *state) -{ - return 1; // autosuspend not supported - videocore always wanted -} - -int -vchiq_platform_use_suspend_timer(void) -{ - return 0; -} -void -vchiq_dump_platform_use_state(struct vchiq_state *state) -{ - vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use"); -} -void -vchiq_platform_handle_timeout(struct vchiq_state *state) -{ - (void)state; -} /* * Local functions */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 4458c1e60fa3..a1ea9777a444 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/compat.h> #include <linux/dma-mapping.h> +#include <linux/rcupdate.h> #include <soc/bcm2835/raspberrypi-firmware.h> #include "vchiq_core.h" @@ -48,39 +49,6 @@ int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; int vchiq_susp_log_level = VCHIQ_LOG_ERROR; -#define SUSPEND_TIMER_TIMEOUT_MS 100 -#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000 - -#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */ -static const char *const suspend_state_names[] = { - "VC_SUSPEND_FORCE_CANCELED", - "VC_SUSPEND_REJECTED", - "VC_SUSPEND_FAILED", - "VC_SUSPEND_IDLE", - "VC_SUSPEND_REQUESTED", - "VC_SUSPEND_IN_PROGRESS", - "VC_SUSPEND_SUSPENDED" -}; -#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */ -static const char *const resume_state_names[] = { - "VC_RESUME_FAILED", - "VC_RESUME_IDLE", - "VC_RESUME_REQUESTED", - "VC_RESUME_IN_PROGRESS", - "VC_RESUME_RESUMED" -}; -/* The number of times we allow force suspend to timeout before actually -** _forcing_ suspend. This is to cater for SW which fails to release vchiq -** correctly - we don't want to prevent ARM suspend indefinitely in this case. -*/ -#define FORCE_SUSPEND_FAIL_MAX 8 - -/* The time in ms allowed for videocore to go idle when force suspend has been - * requested */ -#define FORCE_SUSPEND_TIMEOUT_MS 200 - -static void suspend_timer_callback(struct timer_list *t); - struct user_service { struct vchiq_service *service; void *userdata; @@ -2129,10 +2097,12 @@ int vchiq_dump_platform_instances(void *dump_context) /* There is no list of instances, so instead scan all services, marking those that have been dumped. */ + rcu_read_lock(); for (i = 0; i < state->unused_service; i++) { - struct vchiq_service *service = state->services[i]; + struct vchiq_service *service; struct vchiq_instance *instance; + service = rcu_dereference(state->services[i]); if (!service || service->base.callback != service_callback) continue; @@ -2140,18 +2110,26 @@ int vchiq_dump_platform_instances(void *dump_context) if (instance) instance->mark = 0; } + rcu_read_unlock(); for (i = 0; i < state->unused_service; i++) { - struct vchiq_service *service = state->services[i]; + struct vchiq_service *service; struct vchiq_instance *instance; int err; - if (!service || service->base.callback != service_callback) + rcu_read_lock(); + service = rcu_dereference(state->services[i]); + if (!service || service->base.callback != service_callback) { + rcu_read_unlock(); continue; + } instance = service->instance; - if (!instance || instance->mark) + if (!instance || instance->mark) { + rcu_read_unlock(); continue; + } + rcu_read_unlock(); len = snprintf(buf, sizeof(buf), "Instance %pK: pid %d,%s completions %d/%d", @@ -2161,7 +2139,6 @@ int vchiq_dump_platform_instances(void *dump_context) instance->completion_insert - instance->completion_remove, MAX_COMPLETIONS); - err = vchiq_dump(dump_context, buf, len + 1); if (err) return err; @@ -2184,17 +2161,17 @@ int vchiq_dump_platform_service_state(void *dump_context, char buf[80]; int len; - len = snprintf(buf, sizeof(buf), " instance %pK", service->instance); + len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance); if ((service->base.callback == service_callback) && user_service->is_vchi) { - len += snprintf(buf + len, sizeof(buf) - len, + len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages", user_service->msg_insert - user_service->msg_remove, MSG_QUEUE_SIZE); if (user_service->dequeue_pending) - len += snprintf(buf + len, sizeof(buf) - len, + len += scnprintf(buf + len, sizeof(buf) - len, " (dequeue pending)"); } @@ -2258,27 +2235,6 @@ vchiq_fops = { * Autosuspend related functionality */ -int -vchiq_videocore_wanted(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - - if (!arm_state) - /* autosuspend not supported - always return wanted */ - return 1; - else if (arm_state->blocked_count) - return 1; - else if (!arm_state->videocore_use_count) - /* usage count zero - check for override unless we're forcing */ - if (arm_state->resume_blocked) - return 0; - else - return vchiq_platform_videocore_wanted(state); - else - /* non-zero usage count - videocore still required */ - return 1; -} - static enum vchiq_status vchiq_keepalive_vchiq_callback(enum vchiq_reason reason, struct vchiq_header *header, @@ -2382,317 +2338,13 @@ vchiq_arm_init_state(struct vchiq_state *state, atomic_set(&arm_state->ka_use_ack_count, 0); atomic_set(&arm_state->ka_release_count, 0); - init_completion(&arm_state->vc_suspend_complete); - - init_completion(&arm_state->vc_resume_complete); - /* Initialise to 'done' state. We only want to block on resume - * completion while videocore is suspended. */ - set_resume_state(arm_state, VC_RESUME_RESUMED); - - init_completion(&arm_state->resume_blocker); - /* Initialise to 'done' state. We only want to block on this - * completion while resume is blocked */ - complete_all(&arm_state->resume_blocker); - - init_completion(&arm_state->blocked_blocker); - /* Initialise to 'done' state. We only want to block on this - * completion while things are waiting on the resume blocker */ - complete_all(&arm_state->blocked_blocker); - - arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; - arm_state->suspend_timer_running = 0; arm_state->state = state; - timer_setup(&arm_state->suspend_timer, suspend_timer_callback, - 0); - arm_state->first_connect = 0; } return VCHIQ_SUCCESS; } -/* -** Functions to modify the state variables; -** set_suspend_state -** set_resume_state -** -** There are more state variables than we might like, so ensure they remain in -** step. Suspend and resume state are maintained separately, since most of -** these state machines can operate independently. However, there are a few -** states where state transitions in one state machine cause a reset to the -** other state machine. In addition, there are some completion events which -** need to occur on state machine reset and end-state(s), so these are also -** dealt with in these functions. -** -** In all states we set the state variable according to the input, but in some -** cases we perform additional steps outlined below; -** -** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time. -** The suspend completion is completed after any suspend -** attempt. When we reset the state machine we also reset -** the completion. This reset occurs when videocore is -** resumed, and also if we initiate suspend after a suspend -** failure. -** -** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for -** suspend - ie from this point on we must try to suspend -** before resuming can occur. We therefore also reset the -** resume state machine to VC_RESUME_IDLE in this state. -** -** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call -** complete_all on the suspend completion to notify -** anything waiting for suspend to happen. -** -** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also -** initiate resume, so no need to alter resume state. -** We call complete_all on the suspend completion to notify -** of suspend rejection. -** -** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the -** suspend completion and reset the resume state machine. -** -** VC_RESUME_IDLE - Initialise the resume completion at the same time. The -** resume completion is in it's 'done' state whenever -** videcore is running. Therefore, the VC_RESUME_IDLE -** state implies that videocore is suspended. -** Hence, any thread which needs to wait until videocore is -** running can wait on this completion - it will only block -** if videocore is suspended. -** -** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running. -** Call complete_all on the resume completion to unblock -** any threads waiting for resume. Also reset the suspend -** state machine to it's idle state. -** -** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists. -*/ - -void -set_suspend_state(struct vchiq_arm_state *arm_state, - enum vc_suspend_status new_state) -{ - /* set the state in all cases */ - arm_state->vc_suspend_state = new_state; - - /* state specific additional actions */ - switch (new_state) { - case VC_SUSPEND_FORCE_CANCELED: - complete_all(&arm_state->vc_suspend_complete); - break; - case VC_SUSPEND_REJECTED: - complete_all(&arm_state->vc_suspend_complete); - break; - case VC_SUSPEND_FAILED: - complete_all(&arm_state->vc_suspend_complete); - arm_state->vc_resume_state = VC_RESUME_RESUMED; - complete_all(&arm_state->vc_resume_complete); - break; - case VC_SUSPEND_IDLE: - reinit_completion(&arm_state->vc_suspend_complete); - break; - case VC_SUSPEND_REQUESTED: - break; - case VC_SUSPEND_IN_PROGRESS: - set_resume_state(arm_state, VC_RESUME_IDLE); - break; - case VC_SUSPEND_SUSPENDED: - complete_all(&arm_state->vc_suspend_complete); - break; - default: - BUG(); - break; - } -} - -void -set_resume_state(struct vchiq_arm_state *arm_state, - enum vc_resume_status new_state) -{ - /* set the state in all cases */ - arm_state->vc_resume_state = new_state; - - /* state specific additional actions */ - switch (new_state) { - case VC_RESUME_FAILED: - break; - case VC_RESUME_IDLE: - reinit_completion(&arm_state->vc_resume_complete); - break; - case VC_RESUME_REQUESTED: - break; - case VC_RESUME_IN_PROGRESS: - break; - case VC_RESUME_RESUMED: - complete_all(&arm_state->vc_resume_complete); - set_suspend_state(arm_state, VC_SUSPEND_IDLE); - break; - default: - BUG(); - break; - } -} - -/* should be called with the write lock held */ -inline void -start_suspend_timer(struct vchiq_arm_state *arm_state) -{ - del_timer(&arm_state->suspend_timer); - arm_state->suspend_timer.expires = jiffies + - msecs_to_jiffies(arm_state->suspend_timer_timeout); - add_timer(&arm_state->suspend_timer); - arm_state->suspend_timer_running = 1; -} - -/* should be called with the write lock held */ -static inline void -stop_suspend_timer(struct vchiq_arm_state *arm_state) -{ - if (arm_state->suspend_timer_running) { - del_timer(&arm_state->suspend_timer); - arm_state->suspend_timer_running = 0; - } -} - -static inline int -need_resume(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - - return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) && - (arm_state->vc_resume_state < VC_RESUME_REQUESTED) && - vchiq_videocore_wanted(state); -} - -static inline void -unblock_resume(struct vchiq_arm_state *arm_state) -{ - complete_all(&arm_state->resume_blocker); - arm_state->resume_blocked = 0; -} - -/* Initiate suspend via slot handler. Should be called with the write lock - * held */ -enum vchiq_status -vchiq_arm_vcsuspend(struct vchiq_state *state) -{ - enum vchiq_status status = VCHIQ_ERROR; - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - - if (!arm_state) - goto out; - - vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); - status = VCHIQ_SUCCESS; - - switch (arm_state->vc_suspend_state) { - case VC_SUSPEND_REQUESTED: - vchiq_log_info(vchiq_susp_log_level, "%s: suspend already " - "requested", __func__); - break; - case VC_SUSPEND_IN_PROGRESS: - vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in " - "progress", __func__); - break; - - default: - /* We don't expect to be in other states, so log but continue - * anyway */ - vchiq_log_error(vchiq_susp_log_level, - "%s unexpected suspend state %s", __func__, - suspend_state_names[arm_state->vc_suspend_state + - VC_SUSPEND_NUM_OFFSET]); - /* fall through */ - case VC_SUSPEND_REJECTED: - case VC_SUSPEND_FAILED: - /* Ensure any idle state actions have been run */ - set_suspend_state(arm_state, VC_SUSPEND_IDLE); - /* fall through */ - case VC_SUSPEND_IDLE: - vchiq_log_info(vchiq_susp_log_level, - "%s: suspending", __func__); - set_suspend_state(arm_state, VC_SUSPEND_REQUESTED); - /* kick the slot handler thread to initiate suspend */ - request_poll(state, NULL, 0); - break; - } - -out: - vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); - return status; -} - -void -vchiq_platform_check_suspend(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - int susp = 0; - - if (!arm_state) - goto out; - - vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); - - write_lock_bh(&arm_state->susp_res_lock); - if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED && - arm_state->vc_resume_state == VC_RESUME_RESUMED) { - set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS); - susp = 1; - } - write_unlock_bh(&arm_state->susp_res_lock); - - if (susp) - vchiq_platform_suspend(state); - -out: - vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); - return; -} - -void -vchiq_check_suspend(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - - if (!arm_state) - goto out; - - vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); - - write_lock_bh(&arm_state->susp_res_lock); - if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED && - arm_state->first_connect && - !vchiq_videocore_wanted(state)) { - vchiq_arm_vcsuspend(state); - } - write_unlock_bh(&arm_state->susp_res_lock); - -out: - vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); -} - -/* This function should be called with the write lock held */ -int -vchiq_check_resume(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - int resume = 0; - - if (!arm_state) - goto out; - - vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); - - if (need_resume(state)) { - set_resume_state(arm_state, VC_RESUME_REQUESTED); - request_poll(state, NULL, 0); - resume = 1; - } - -out: - vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); - return resume; -} - enum vchiq_status vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, enum USE_TYPE_E use_type) @@ -2724,88 +2376,15 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, } write_lock_bh(&arm_state->susp_res_lock); - while (arm_state->resume_blocked) { - /* If we call 'use' while force suspend is waiting for suspend, - * then we're about to block the thread which the force is - * waiting to complete, so we're bound to just time out. In this - * case, set the suspend state such that the wait will be - * canceled, so we can complete as quickly as possible. */ - if (arm_state->resume_blocked && arm_state->vc_suspend_state == - VC_SUSPEND_IDLE) { - set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED); - break; - } - /* If suspend is already in progress then we need to block */ - if (!try_wait_for_completion(&arm_state->resume_blocker)) { - /* Indicate that there are threads waiting on the resume - * blocker. These need to be allowed to complete before - * a _second_ call to force suspend can complete, - * otherwise low priority threads might never actually - * continue */ - arm_state->blocked_count++; - write_unlock_bh(&arm_state->susp_res_lock); - vchiq_log_info(vchiq_susp_log_level, "%s %s resume " - "blocked - waiting...", __func__, entity); - if (wait_for_completion_killable( - &arm_state->resume_blocker)) { - vchiq_log_error(vchiq_susp_log_level, "%s %s " - "wait for resume blocker interrupted", - __func__, entity); - ret = VCHIQ_ERROR; - write_lock_bh(&arm_state->susp_res_lock); - arm_state->blocked_count--; - write_unlock_bh(&arm_state->susp_res_lock); - goto out; - } - vchiq_log_info(vchiq_susp_log_level, "%s %s resume " - "unblocked", __func__, entity); - write_lock_bh(&arm_state->susp_res_lock); - if (--arm_state->blocked_count == 0) - complete_all(&arm_state->blocked_blocker); - } - } - - stop_suspend_timer(arm_state); - local_uc = ++arm_state->videocore_use_count; local_entity_uc = ++(*entity_uc); - /* If there's a pending request which hasn't yet been serviced then - * just clear it. If we're past VC_SUSPEND_REQUESTED state then - * vc_resume_complete will block until we either resume or fail to - * suspend */ - if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED) - set_suspend_state(arm_state, VC_SUSPEND_IDLE); - - if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) { - set_resume_state(arm_state, VC_RESUME_REQUESTED); - vchiq_log_info(vchiq_susp_log_level, - "%s %s count %d, state count %d", - __func__, entity, local_entity_uc, local_uc); - request_poll(state, NULL, 0); - } else - vchiq_log_trace(vchiq_susp_log_level, - "%s %s count %d, state count %d", - __func__, entity, *entity_uc, local_uc); + vchiq_log_trace(vchiq_susp_log_level, + "%s %s count %d, state count %d", + __func__, entity, *entity_uc, local_uc); write_unlock_bh(&arm_state->susp_res_lock); - /* Completion is in a done state when we're not suspended, so this won't - * block for the non-suspended case. */ - if (!try_wait_for_completion(&arm_state->vc_resume_complete)) { - vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume", - __func__, entity); - if (wait_for_completion_killable( - &arm_state->vc_resume_complete)) { - vchiq_log_error(vchiq_susp_log_level, "%s %s wait for " - "resume interrupted", __func__, entity); - ret = VCHIQ_ERROR; - goto out; - } - vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__, - entity); - } - if (ret == VCHIQ_SUCCESS) { enum vchiq_status status = VCHIQ_SUCCESS; long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); @@ -2860,24 +2439,10 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service) --arm_state->videocore_use_count; --(*entity_uc); - if (!vchiq_videocore_wanted(state)) { - if (vchiq_platform_use_suspend_timer() && - !arm_state->resume_blocked) { - /* Only use the timer if we're not trying to force - * suspend (=> resume_blocked) */ - start_suspend_timer(arm_state); - } else { - vchiq_log_info(vchiq_susp_log_level, - "%s %s count %d, state count %d - suspending", - __func__, entity, *entity_uc, - arm_state->videocore_use_count); - vchiq_arm_vcsuspend(state); - } - } else - vchiq_log_trace(vchiq_susp_log_level, - "%s %s count %d, state count %d", - __func__, entity, *entity_uc, - arm_state->videocore_use_count); + vchiq_log_trace(vchiq_susp_log_level, + "%s %s count %d, state count %d", + __func__, entity, *entity_uc, + arm_state->videocore_use_count); unlock: write_unlock_bh(&arm_state->susp_res_lock); @@ -2932,11 +2497,11 @@ vchiq_instance_get_use_count(struct vchiq_instance *instance) int use_count = 0, i; i = 0; - while ((service = next_service_by_instance(instance->state, - instance, &i))) { + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) use_count += service->service_use_count; - unlock_service(service); - } + rcu_read_unlock(); return use_count; } @@ -2959,25 +2524,14 @@ vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) int i; i = 0; - while ((service = next_service_by_instance(instance->state, - instance, &i))) { + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) service->trace = trace; - unlock_service(service); - } + rcu_read_unlock(); instance->trace = (trace != 0); } -static void suspend_timer_callback(struct timer_list *t) -{ - struct vchiq_arm_state *arm_state = - from_timer(arm_state, t, suspend_timer); - struct vchiq_state *state = arm_state->state; - - vchiq_log_info(vchiq_susp_log_level, - "%s - suspend timer expired - check suspend", __func__); - vchiq_check_suspend(state); -} - enum vchiq_status vchiq_use_service(unsigned int handle) { @@ -3022,8 +2576,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state) int only_nonzero = 0; static const char *nz = "<-- preventing suspend"; - enum vc_suspend_status vc_suspend_state; - enum vc_resume_status vc_resume_state; int peer_count; int vc_use_count; int active_services; @@ -3037,16 +2589,16 @@ vchiq_dump_service_use_state(struct vchiq_state *state) return; read_lock_bh(&arm_state->susp_res_lock); - vc_suspend_state = arm_state->vc_suspend_state; - vc_resume_state = arm_state->vc_resume_state; peer_count = arm_state->peer_use_count; vc_use_count = arm_state->videocore_use_count; active_services = state->unused_service; if (active_services > MAX_SERVICES) only_nonzero = 1; + rcu_read_lock(); for (i = 0; i < active_services; i++) { - struct vchiq_service *service_ptr = state->services[i]; + struct vchiq_service *service_ptr = + rcu_dereference(state->services[i]); if (!service_ptr) continue; @@ -3064,16 +2616,10 @@ vchiq_dump_service_use_state(struct vchiq_state *state) if (found >= MAX_SERVICES) break; } + rcu_read_unlock(); read_unlock_bh(&arm_state->susp_res_lock); - vchiq_log_warning(vchiq_susp_log_level, - "-- Videcore suspend state: %s --", - suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]); - vchiq_log_warning(vchiq_susp_log_level, - "-- Videcore resume state: %s --", - resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]); - if (only_nonzero) vchiq_log_warning(vchiq_susp_log_level, "Too many active " "services (%d). Only dumping up to first %d services " @@ -3093,8 +2639,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state) "--- Overall vchiq instance use count %d", vc_use_count); kfree(service_data); - - vchiq_dump_platform_use_state(state); } enum vchiq_status @@ -3118,24 +2662,16 @@ vchiq_check_service(struct vchiq_service *service) if (ret == VCHIQ_ERROR) { vchiq_log_error(vchiq_susp_log_level, "%s ERROR - %c%c%c%c:%d service count %d, " - "state count %d, videocore suspend state %s", __func__, + "state count %d", __func__, VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id, service->service_use_count, - arm_state->videocore_use_count, - suspend_state_names[arm_state->vc_suspend_state + - VC_SUSPEND_NUM_OFFSET]); + arm_state->videocore_use_count); vchiq_dump_service_use_state(service->state); } out: return ret; } -/* stub functions */ -void vchiq_on_remote_use_active(struct vchiq_state *state) -{ - (void)state; -} - void vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate oldstate, enum vchiq_connstate newstate) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h index 19d2a2eefb6a..0784c5002417 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h @@ -14,27 +14,8 @@ #include "vchiq_core.h" #include "vchiq_debugfs.h" -enum vc_suspend_status { - VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */ - VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */ - VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */ - VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */ - VC_SUSPEND_REQUESTED, /* User has requested suspend */ - VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */ - VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */ -}; - -enum vc_resume_status { - VC_RESUME_FAILED = -1, /* Videocore resume failed */ - VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */ - VC_RESUME_REQUESTED, /* User has requested resume */ - VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */ - VC_RESUME_RESUMED /* Videocore resumed successfully (active) */ -}; - enum USE_TYPE_E { USE_TYPE_SERVICE, - USE_TYPE_SERVICE_NO_RESUME, USE_TYPE_VCHIQ }; @@ -46,19 +27,9 @@ struct vchiq_arm_state { atomic_t ka_use_ack_count; atomic_t ka_release_count; - struct completion vc_suspend_complete; - struct completion vc_resume_complete; - rwlock_t susp_res_lock; - enum vc_suspend_status vc_suspend_state; - enum vc_resume_status vc_resume_state; - - unsigned int wake_address; struct vchiq_state *state; - struct timer_list suspend_timer; - int suspend_timer_timeout; - int suspend_timer_running; /* Global use count for videocore. ** This is equal to the sum of the use counts for all services. When @@ -72,27 +43,11 @@ struct vchiq_arm_state { */ int peer_use_count; - /* Flag to indicate whether resume is blocked. This happens when the - ** ARM is suspending - */ - struct completion resume_blocker; - int resume_blocked; - struct completion blocked_blocker; - int blocked_count; - - int autosuspend_override; - /* Flag to indicate that the first vchiq connect has made it through. ** This means that both sides should be fully ready, and we should ** be able to suspend after this point. */ int first_connect; - - unsigned long long suspend_start_time; - unsigned long long sleep_start_time; - unsigned long long resume_start_time; - unsigned long long last_wake_time; - }; struct vchiq_drvdata { @@ -110,18 +65,9 @@ extern struct vchiq_state * vchiq_get_state(void); extern enum vchiq_status -vchiq_arm_vcsuspend(struct vchiq_state *state); - -extern enum vchiq_status -vchiq_arm_vcresume(struct vchiq_state *state); - -extern enum vchiq_status vchiq_arm_init_state(struct vchiq_state *state, struct vchiq_arm_state *arm_state); -extern int -vchiq_check_resume(struct vchiq_state *state); - extern void vchiq_check_suspend(struct vchiq_state *state); enum vchiq_status @@ -133,15 +79,6 @@ vchiq_release_service(unsigned int handle); extern enum vchiq_status vchiq_check_service(struct vchiq_service *service); -extern enum vchiq_status -vchiq_platform_suspend(struct vchiq_state *state); - -extern int -vchiq_platform_videocore_wanted(struct vchiq_state *state); - -extern int -vchiq_platform_use_suspend_timer(void); - extern void vchiq_dump_platform_use_state(struct vchiq_state *state); @@ -151,8 +88,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state); extern struct vchiq_arm_state* vchiq_platform_get_arm_state(struct vchiq_state *state); -extern int -vchiq_videocore_wanted(struct vchiq_state *state); extern enum vchiq_status vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, @@ -176,15 +111,4 @@ vchiq_instance_get_trace(struct vchiq_instance *instance); extern void vchiq_instance_set_trace(struct vchiq_instance *instance, int trace); -extern void -set_suspend_state(struct vchiq_arm_state *arm_state, - enum vc_suspend_status new_state); - -extern void -set_resume_state(struct vchiq_arm_state *arm_state, - enum vc_resume_status new_state); - -extern void -start_suspend_timer(struct vchiq_arm_state *arm_state); - #endif /* VCHIQ_ARM_H */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 76351078affb..edcd97373809 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ +#include <linux/kref.h> +#include <linux/rcupdate.h> + #include "vchiq_core.h" #define VCHIQ_SLOT_HANDLER_STACK 8192 @@ -54,7 +57,6 @@ int vchiq_core_log_level = VCHIQ_LOG_DEFAULT; int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT; int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT; -static DEFINE_SPINLOCK(service_spinlock); DEFINE_SPINLOCK(bulk_waiter_spinlock); static DEFINE_SPINLOCK(quota_spinlock); @@ -136,44 +138,41 @@ find_service_by_handle(unsigned int handle) { struct vchiq_service *service; - spin_lock(&service_spinlock); + rcu_read_lock(); service = handle_to_service(handle); - if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) && - (service->handle == handle)) { - WARN_ON(service->ref_count == 0); - service->ref_count++; - } else - service = NULL; - spin_unlock(&service_spinlock); - - if (!service) - vchiq_log_info(vchiq_core_log_level, - "Invalid service handle 0x%x", handle); - - return service; + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + vchiq_log_info(vchiq_core_log_level, + "Invalid service handle 0x%x", handle); + return NULL; } struct vchiq_service * find_service_by_port(struct vchiq_state *state, int localport) { - struct vchiq_service *service = NULL; if ((unsigned int)localport <= VCHIQ_PORT_MAX) { - spin_lock(&service_spinlock); - service = state->services[localport]; - if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) { - WARN_ON(service->ref_count == 0); - service->ref_count++; - } else - service = NULL; - spin_unlock(&service_spinlock); - } - - if (!service) - vchiq_log_info(vchiq_core_log_level, - "Invalid port %d", localport); + struct vchiq_service *service; - return service; + rcu_read_lock(); + service = rcu_dereference(state->services[localport]); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + } + vchiq_log_info(vchiq_core_log_level, + "Invalid port %d", localport); + return NULL; } struct vchiq_service * @@ -182,22 +181,20 @@ find_service_for_instance(struct vchiq_instance *instance, { struct vchiq_service *service; - spin_lock(&service_spinlock); + rcu_read_lock(); service = handle_to_service(handle); - if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) && - (service->handle == handle) && - (service->instance == instance)) { - WARN_ON(service->ref_count == 0); - service->ref_count++; - } else - service = NULL; - spin_unlock(&service_spinlock); - - if (!service) - vchiq_log_info(vchiq_core_log_level, - "Invalid service handle 0x%x", handle); - - return service; + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + vchiq_log_info(vchiq_core_log_level, + "Invalid service handle 0x%x", handle); + return NULL; } struct vchiq_service * @@ -206,121 +203,125 @@ find_closed_service_for_instance(struct vchiq_instance *instance, { struct vchiq_service *service; - spin_lock(&service_spinlock); + rcu_read_lock(); service = handle_to_service(handle); if (service && - ((service->srvstate == VCHIQ_SRVSTATE_FREE) || - (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) && - (service->handle == handle) && - (service->instance == instance)) { - WARN_ON(service->ref_count == 0); - service->ref_count++; - } else - service = NULL; - spin_unlock(&service_spinlock); - - if (!service) - vchiq_log_info(vchiq_core_log_level, - "Invalid service handle 0x%x", handle); - + (service->srvstate == VCHIQ_SRVSTATE_FREE || + service->srvstate == VCHIQ_SRVSTATE_CLOSED) && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + vchiq_log_info(vchiq_core_log_level, + "Invalid service handle 0x%x", handle); return service; } struct vchiq_service * -next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance, - int *pidx) +__next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) { struct vchiq_service *service = NULL; int idx = *pidx; - spin_lock(&service_spinlock); while (idx < state->unused_service) { - struct vchiq_service *srv = state->services[idx++]; + struct vchiq_service *srv; - if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) && - (srv->instance == instance)) { + srv = rcu_dereference(state->services[idx++]); + if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE && + srv->instance == instance) { service = srv; - WARN_ON(service->ref_count == 0); - service->ref_count++; break; } } - spin_unlock(&service_spinlock); *pidx = idx; + return service; +} + +struct vchiq_service * +next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) +{ + struct vchiq_service *service; + rcu_read_lock(); + while (1) { + service = __next_service_by_instance(state, instance, pidx); + if (!service) + break; + if (kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + break; + } + } + rcu_read_unlock(); return service; } void lock_service(struct vchiq_service *service) { - spin_lock(&service_spinlock); - WARN_ON(!service); - if (service) { - WARN_ON(service->ref_count == 0); - service->ref_count++; + if (!service) { + WARN(1, "%s service is NULL\n", __func__); + return; } - spin_unlock(&service_spinlock); + kref_get(&service->ref_count); +} + +static void service_release(struct kref *kref) +{ + struct vchiq_service *service = + container_of(kref, struct vchiq_service, ref_count); + struct vchiq_state *state = service->state; + + WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); + rcu_assign_pointer(state->services[service->localport], NULL); + if (service->userdata_term) + service->userdata_term(service->base.userdata); + kfree_rcu(service, rcu); } void unlock_service(struct vchiq_service *service) { - spin_lock(&service_spinlock); if (!service) { WARN(1, "%s: service is NULL\n", __func__); - goto unlock; - } - if (!service->ref_count) { - WARN(1, "%s: ref_count is zero\n", __func__); - goto unlock; - } - service->ref_count--; - if (!service->ref_count) { - struct vchiq_state *state = service->state; - - WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); - state->services[service->localport] = NULL; - } else { - service = NULL; + return; } -unlock: - spin_unlock(&service_spinlock); - - if (service && service->userdata_term) - service->userdata_term(service->base.userdata); - - kfree(service); + kref_put(&service->ref_count, service_release); } int vchiq_get_client_id(unsigned int handle) { - struct vchiq_service *service = find_service_by_handle(handle); + struct vchiq_service *service; int id; + rcu_read_lock(); + service = handle_to_service(handle); id = service ? service->client_id : 0; - if (service) - unlock_service(service); - + rcu_read_unlock(); return id; } void * vchiq_get_service_userdata(unsigned int handle) { - struct vchiq_service *service = handle_to_service(handle); - - return service ? service->base.userdata : NULL; -} - -int -vchiq_get_service_fourcc(unsigned int handle) -{ - struct vchiq_service *service = handle_to_service(handle); + void *userdata; + struct vchiq_service *service; - return service ? service->base.fourcc : 0; + rcu_read_lock(); + service = handle_to_service(handle); + userdata = service ? service->base.userdata : NULL; + rcu_read_unlock(); + return userdata; } static void @@ -468,19 +469,23 @@ get_listening_service(struct vchiq_state *state, int fourcc) WARN_ON(fourcc == VCHIQ_FOURCC_INVALID); + rcu_read_lock(); for (i = 0; i < state->unused_service; i++) { - struct vchiq_service *service = state->services[i]; + struct vchiq_service *service; + service = rcu_dereference(state->services[i]); if (service && - (service->public_fourcc == fourcc) && - ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) || - ((service->srvstate == VCHIQ_SRVSTATE_OPEN) && - (service->remoteport == VCHIQ_PORT_FREE)))) { - lock_service(service); + service->public_fourcc == fourcc && + (service->srvstate == VCHIQ_SRVSTATE_LISTENING || + (service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == VCHIQ_PORT_FREE)) && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); return service; } } - + rcu_read_unlock(); return NULL; } @@ -490,15 +495,20 @@ get_connected_service(struct vchiq_state *state, unsigned int port) { int i; + rcu_read_lock(); for (i = 0; i < state->unused_service; i++) { - struct vchiq_service *service = state->services[i]; - - if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN) - && (service->remoteport == port)) { - lock_service(service); + struct vchiq_service *service = + rcu_dereference(state->services[i]); + + if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == port && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); return service; } } + rcu_read_unlock(); return NULL; } @@ -1798,7 +1808,6 @@ parse_rx_slots(struct vchiq_state *state) } /* At this point slot_mutex is held */ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED); - vchiq_platform_paused(state); break; case VCHIQ_MSG_RESUME: vchiq_log_trace(vchiq_core_log_level, @@ -1807,7 +1816,6 @@ parse_rx_slots(struct vchiq_state *state) /* Release the slot mutex */ mutex_unlock(&state->slot_mutex); vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); - vchiq_platform_resumed(state); break; case VCHIQ_MSG_REMOTE_USE: @@ -1817,7 +1825,6 @@ parse_rx_slots(struct vchiq_state *state) vchiq_on_remote_release(state); break; case VCHIQ_MSG_REMOTE_USE_ACTIVE: - vchiq_on_remote_use_active(state); break; default: @@ -1869,9 +1876,6 @@ slot_handler_func(void *v) DEBUG_TRACE(SLOT_HANDLER_LINE); if (state->poll_needed) { - /* Check if we need to suspend - may change our - * conn_state */ - vchiq_platform_check_suspend(state); state->poll_needed = 0; @@ -1897,10 +1901,6 @@ slot_handler_func(void *v) } break; - case VCHIQ_CONNSTATE_PAUSED: - vchiq_platform_resume(state); - break; - case VCHIQ_CONNSTATE_RESUMING: if (queue_message(state, NULL, VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0), @@ -1908,7 +1908,6 @@ slot_handler_func(void *v) != VCHIQ_RETRY) { vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); - vchiq_platform_resumed(state); } else { /* This should really be impossible, ** since the PAUSE should have flushed @@ -1918,11 +1917,6 @@ slot_handler_func(void *v) "message"); } break; - - case VCHIQ_CONNSTATE_PAUSE_TIMEOUT: - case VCHIQ_CONNSTATE_RESUME_TIMEOUT: - vchiq_platform_handle_timeout(state); - break; default: break; } @@ -2284,7 +2278,7 @@ vchiq_add_service_internal(struct vchiq_state *state, vchiq_userdata_term userdata_term) { struct vchiq_service *service; - struct vchiq_service **pservice = NULL; + struct vchiq_service __rcu **pservice = NULL; struct vchiq_service_quota *service_quota; int i; @@ -2296,7 +2290,7 @@ vchiq_add_service_internal(struct vchiq_state *state, service->base.callback = params->callback; service->base.userdata = params->userdata; service->handle = VCHIQ_SERVICE_HANDLE_INVALID; - service->ref_count = 1; + kref_init(&service->ref_count); service->srvstate = VCHIQ_SRVSTATE_FREE; service->userdata_term = userdata_term; service->localport = VCHIQ_PORT_FREE; @@ -2322,7 +2316,7 @@ vchiq_add_service_internal(struct vchiq_state *state, mutex_init(&service->bulk_mutex); memset(&service->stats, 0, sizeof(service->stats)); - /* Although it is perfectly possible to use service_spinlock + /* Although it is perfectly possible to use a spinlock ** to protect the creation of services, it is overkill as it ** disables interrupts while the array is searched. ** The only danger is of another thread trying to create a @@ -2340,17 +2334,17 @@ vchiq_add_service_internal(struct vchiq_state *state, if (srvstate == VCHIQ_SRVSTATE_OPENING) { for (i = 0; i < state->unused_service; i++) { - struct vchiq_service *srv = state->services[i]; - - if (!srv) { + if (!rcu_access_pointer(state->services[i])) { pservice = &state->services[i]; break; } } } else { + rcu_read_lock(); for (i = (state->unused_service - 1); i >= 0; i--) { - struct vchiq_service *srv = state->services[i]; + struct vchiq_service *srv; + srv = rcu_dereference(state->services[i]); if (!srv) pservice = &state->services[i]; else if ((srv->public_fourcc == params->fourcc) @@ -2363,6 +2357,7 @@ vchiq_add_service_internal(struct vchiq_state *state, break; } } + rcu_read_unlock(); } if (pservice) { @@ -2374,7 +2369,7 @@ vchiq_add_service_internal(struct vchiq_state *state, (state->id * VCHIQ_MAX_SERVICES) | service->localport; handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES; - *pservice = service; + rcu_assign_pointer(*pservice, service); if (pservice == &state->services[state->unused_service]) state->unused_service++; } @@ -2437,13 +2432,13 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id) status = VCHIQ_RETRY; vchiq_release_service_internal(service); } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) && - (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) vchiq_log_error(vchiq_core_log_level, - "%d: osi - srvstate = %s (ref %d)", - service->state->id, - srvstate_names[service->srvstate], - service->ref_count); + "%d: osi - srvstate = %s (ref %u)", + service->state->id, + srvstate_names[service->srvstate], + kref_read(&service->ref_count)); status = VCHIQ_ERROR; VCHIQ_SERVICE_STATS_INC(service, error_count); vchiq_release_service_internal(service); @@ -3449,10 +3444,13 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service) char buf[80]; int len; int err; + unsigned int ref_count; + /*Don't include the lock just taken*/ + ref_count = kref_read(&service->ref_count) - 1; len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)", - service->localport, srvstate_names[service->srvstate], - service->ref_count - 1); /*Don't include the lock just taken*/ + service->localport, srvstate_names[service->srvstate], + ref_count); if (service->srvstate != VCHIQ_SRVSTATE_FREE) { char remoteport[30]; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h index c31f953a9986..cedd8e721aae 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h @@ -7,6 +7,8 @@ #include <linux/mutex.h> #include <linux/completion.h> #include <linux/kthread.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> #include <linux/wait.h> #include "vchiq_cfg.h" @@ -251,7 +253,8 @@ struct vchiq_slot_info { struct vchiq_service { struct vchiq_service_base base; unsigned int handle; - unsigned int ref_count; + struct kref ref_count; + struct rcu_head rcu; int srvstate; vchiq_userdata_term userdata_term; unsigned int localport; @@ -464,7 +467,7 @@ struct vchiq_state { int error_count; } stats; - struct vchiq_service *services[VCHIQ_MAX_SERVICES]; + struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES]; struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES]; struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS]; @@ -545,12 +548,13 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service, static inline struct vchiq_service * handle_to_service(unsigned int handle) { + int idx = handle & (VCHIQ_MAX_SERVICES - 1); struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) & (VCHIQ_MAX_STATES - 1)]; + if (!state) return NULL; - - return state->services[handle & (VCHIQ_MAX_SERVICES - 1)]; + return rcu_dereference(state->services[idx]); } extern struct vchiq_service * @@ -568,7 +572,13 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle); extern struct vchiq_service * -next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance, +__next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx); + +extern struct vchiq_service * +next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, int *pidx); extern void @@ -590,18 +600,6 @@ vchiq_complete_bulk(struct vchiq_bulk *bulk); extern void remote_event_signal(struct remote_event *event); -void -vchiq_platform_check_suspend(struct vchiq_state *state); - -extern void -vchiq_platform_paused(struct vchiq_state *state); - -extern enum vchiq_status -vchiq_platform_resume(struct vchiq_state *state); - -extern void -vchiq_platform_resumed(struct vchiq_state *state); - extern int vchiq_dump(void *dump_context, const char *str, int len); @@ -648,9 +646,6 @@ vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate newstate); extern void -vchiq_platform_handle_timeout(struct vchiq_state *state); - -extern void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate); extern void diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h index 07c6a3db5ab6..39b77ea19210 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h @@ -13,7 +13,6 @@ #define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3)) #define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service) -#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service) enum vchiq_reason { VCHIQ_SERVICE_OPENED, /* service, -, - */ @@ -128,7 +127,6 @@ extern enum vchiq_status vchiq_bulk_receive_handle(unsigned int service, enum vchiq_bulk_mode mode); extern int vchiq_get_client_id(unsigned int service); extern void *vchiq_get_service_userdata(unsigned int service); -extern int vchiq_get_service_fourcc(unsigned int service); extern void vchiq_get_config(struct vchiq_config *config); extern enum vchiq_status vchiq_set_service_option(unsigned int service, enum vchiq_service_option option, int value); diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h index 25867cb9d13d..337266add6b2 100644 --- a/drivers/staging/vt6655/card.h +++ b/drivers/staging/vt6655/card.h @@ -63,6 +63,6 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type); bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate, u64 qwBSSTimestamp); bool CARDbSetBeaconPeriod(struct vnt_private *priv, - unsigned short wBeaconInterval); + unsigned short wBeaconInterval); #endif /* __CARD_H__ */ diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index f69fc687d4c3..5c86cc60eb5c 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -133,7 +133,8 @@ static int device_init_td1_ring(struct vnt_private *priv); static int device_rx_srv(struct vnt_private *priv, unsigned int idx); static int device_tx_srv(struct vnt_private *priv, unsigned int idx); static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *); -static void device_free_rx_buf(struct vnt_private *priv, struct vnt_rx_desc *rd); +static void device_free_rx_buf(struct vnt_private *priv, + struct vnt_rx_desc *rd); static void device_init_registers(struct vnt_private *priv); static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *); static void device_free_td0_ring(struct vnt_private *priv); @@ -442,7 +443,10 @@ static bool device_init_rings(struct vnt_private *priv) /*allocate all RD/TD rings a single pool*/ vir_pool = dma_alloc_coherent(&priv->pcid->dev, - priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), &priv->pool_dma, GFP_ATOMIC); if (!vir_pool) { dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c index bfd598a93b04..6b0407694e54 100644 --- a/drivers/staging/vt6655/power.c +++ b/drivers/staging/vt6655/power.c @@ -58,14 +58,11 @@ void PSvEnablePowerSaving(struct vnt_private *priv, if (priv->op_mode != NL80211_IFTYPE_ADHOC) { /* set AID */ VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID); - } else { - /* set ATIM Window */ -#if 0 /* TODO atim window */ - MACvWriteATIMW(priv->PortOffset, pMgmt->wCurrATIMWindow); -#endif } + /* Set AutoSleep */ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + /* Set HWUTSF */ MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); @@ -101,10 +98,13 @@ void PSvDisablePowerSaving(struct vnt_private *priv) { /* disable power saving hw function */ MACbPSWakeup(priv); + /* clear AutoSleep */ MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + /* clear HWUTSF */ MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); + /* set always listen beacon */ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile index b64c0d87f612..375f54e9f58b 100644 --- a/drivers/staging/vt6656/Makefile +++ b/drivers/staging/vt6656/Makefile @@ -9,13 +9,11 @@ vt6656_stage-y += main_usb.o \ baseband.o \ wcmd.o\ rxtx.o \ - dpc.o \ power.o \ key.o \ rf.o \ usbpipe.o \ channel.o \ - firmware.o \ - int.o + firmware.o obj-$(CONFIG_VT6656) += vt6656_stage.o diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index f18e059ce66b..a19a563d8bcc 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -22,6 +22,8 @@ * */ +#include <linux/bits.h> +#include <linux/kernel.h> #include "mac.h" #include "baseband.h" #include "rf.h" @@ -132,7 +134,6 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type, { unsigned int frame_time; unsigned int preamble; - unsigned int tmp; unsigned int rate = 0; if (tx_rate > RATE_54M) @@ -146,20 +147,11 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type, else preamble = 192; - frame_time = (frame_length * 80) / rate; - tmp = (frame_time * rate) / 80; - - if (frame_length != tmp) - frame_time++; - + frame_time = DIV_ROUND_UP(frame_length * 80, rate); return preamble + frame_time; } - frame_time = (frame_length * 8 + 22) / rate; - tmp = ((frame_time * rate) - 22) / 8; - - if (frame_length != tmp) - frame_time++; + frame_time = DIV_ROUND_UP(frame_length * 8 + 22, rate); frame_time = frame_time * 4; if (pkt_type != PK_TYPE_11A) @@ -213,11 +205,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length, break; case RATE_5M: - count = (bit_count * 10) / 55; - tmp = (count * 55) / 10; - - if (tmp != bit_count) - count++; + count = DIV_ROUND_UP(bit_count * 10, 55); if (preamble_type == 1) phy->signal = 0x0a; @@ -367,9 +355,6 @@ int vnt_vt3184_init(struct vnt_private *priv) int ret = 0; u16 length; u8 *addr; - u8 *agc; - u16 length_agc; - u8 array[256]; u8 data; ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0, MESSAGE_REQUEST_EEPROM, @@ -386,8 +371,6 @@ int vnt_vt3184_init(struct vnt_private *priv) priv->bb_rx_conf = vnt_vt3184_al2230[10]; length = sizeof(vnt_vt3184_al2230); addr = vnt_vt3184_al2230; - agc = vnt_vt3184_agc; - length_agc = sizeof(vnt_vt3184_agc); priv->bb_vga[0] = 0x1C; priv->bb_vga[1] = 0x10; @@ -398,8 +381,6 @@ int vnt_vt3184_init(struct vnt_private *priv) priv->bb_rx_conf = vnt_vt3184_al2230[10]; length = sizeof(vnt_vt3184_al2230); addr = vnt_vt3184_al2230; - agc = vnt_vt3184_agc; - length_agc = sizeof(vnt_vt3184_agc); addr[0xd7] = 0x06; @@ -413,8 +394,6 @@ int vnt_vt3184_init(struct vnt_private *priv) priv->bb_rx_conf = vnt_vt3184_vt3226d0[10]; length = sizeof(vnt_vt3184_vt3226d0); addr = vnt_vt3184_vt3226d0; - agc = vnt_vt3184_agc; - length_agc = sizeof(vnt_vt3184_agc); priv->bb_vga[0] = 0x20; priv->bb_vga[1] = 0x10; @@ -430,8 +409,6 @@ int vnt_vt3184_init(struct vnt_private *priv) priv->bb_rx_conf = vnt_vt3184_vt3226d0[10]; length = sizeof(vnt_vt3184_vt3226d0); addr = vnt_vt3184_vt3226d0; - agc = vnt_vt3184_agc; - length_agc = sizeof(vnt_vt3184_agc); priv->bb_vga[0] = 0x20; priv->bb_vga[1] = 0x10; @@ -447,17 +424,14 @@ int vnt_vt3184_init(struct vnt_private *priv) goto end; } - memcpy(array, addr, length); - ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE, - MESSAGE_REQUEST_BBREG, length, array); + MESSAGE_REQUEST_BBREG, length, addr); if (ret) goto end; - memcpy(array, agc, length_agc); - ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, - MESSAGE_REQUEST_BBAGC, length_agc, array); + MESSAGE_REQUEST_BBAGC, + sizeof(vnt_vt3184_agc), vnt_vt3184_agc); if (ret) goto end; @@ -468,7 +442,7 @@ int vnt_vt3184_init(struct vnt_private *priv) if (ret) goto end; - ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01); + ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0)); if (ret) goto end; } else if (priv->rf_type == RF_VT3226D0) { @@ -477,7 +451,7 @@ int vnt_vt3184_init(struct vnt_private *priv) if (ret) goto end; - ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01); + ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0)); if (ret) goto end; } diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 7958fc165462..dc3ab10eb630 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -26,6 +26,7 @@ * */ +#include <linux/bits.h> #include "device.h" #include "card.h" #include "baseband.h" @@ -63,7 +64,8 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel) vnt_mac_reg_bits_on(priv, MAC_REG_MACCR, MACCR_CLRNAV); /* Set Channel[7] = 0 to tell H/W channel is changing now. */ - vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL, 0xb0); + vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL, + (BIT(7) | BIT(5) | BIT(4))); vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL, connection_channel, 0, 0, NULL); diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h index 3a83a9ea9a2a..703597a911f4 100644 --- a/drivers/staging/vt6656/desc.h +++ b/drivers/staging/vt6656/desc.h @@ -18,6 +18,7 @@ #ifndef __DESC_H__ #define __DESC_H__ +#include <linux/bits.h> #include <linux/types.h> #include <linux/mm.h> @@ -36,32 +37,32 @@ /* * bits in the RSR register */ -#define RSR_ADDRBROAD 0x80 -#define RSR_ADDRMULTI 0x40 +#define RSR_ADDRBROAD BIT(7) +#define RSR_ADDRMULTI BIT(6) #define RSR_ADDRUNI 0x00 -#define RSR_IVLDTYP 0x20 /* invalid packet type */ -#define RSR_IVLDLEN 0x10 /* invalid len (> 2312 byte) */ -#define RSR_BSSIDOK 0x08 -#define RSR_CRCOK 0x04 -#define RSR_BCNSSIDOK 0x02 -#define RSR_ADDROK 0x01 +#define RSR_IVLDTYP BIT(5) /* invalid packet type */ +#define RSR_IVLDLEN BIT(4) /* invalid len (> 2312 byte) */ +#define RSR_BSSIDOK BIT(3) +#define RSR_CRCOK BIT(2) +#define RSR_BCNSSIDOK BIT(1) +#define RSR_ADDROK BIT(0) /* * bits in the new RSR register */ -#define NEWRSR_DECRYPTOK 0x10 -#define NEWRSR_CFPIND 0x08 -#define NEWRSR_HWUTSF 0x04 -#define NEWRSR_BCNHITAID 0x02 -#define NEWRSR_BCNHITAID0 0x01 +#define NEWRSR_DECRYPTOK BIT(4) +#define NEWRSR_CFPIND BIT(3) +#define NEWRSR_HWUTSF BIT(2) +#define NEWRSR_BCNHITAID BIT(1) +#define NEWRSR_BCNHITAID0 BIT(0) /* * bits in the TSR register */ -#define TSR_RETRYTMO 0x08 -#define TSR_TMO 0x04 -#define TSR_ACKDATA 0x02 -#define TSR_VALID 0x01 +#define TSR_RETRYTMO BIT(3) +#define TSR_TMO BIT(2) +#define TSR_ACKDATA BIT(1) +#define TSR_VALID BIT(0) #define FIFOCTL_AUTO_FB_1 0x1000 #define FIFOCTL_AUTO_FB_0 0x0800 diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index fe6c11266123..e6ee9411f080 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -16,6 +16,7 @@ #ifndef __DEVICE_H__ #define __DEVICE_H__ +#include <linux/bits.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -129,12 +130,12 @@ #define EEP_OFS_OFDMA_PWR_TBL 0x50 /* Bits in EEP_OFS_ANTENNA */ -#define EEP_ANTENNA_MAIN 0x1 -#define EEP_ANTENNA_AUX 0x2 -#define EEP_ANTINV 0x4 +#define EEP_ANTENNA_MAIN BIT(0) +#define EEP_ANTENNA_AUX BIT(1) +#define EEP_ANTINV BIT(2) /* Bits in EEP_OFS_RADIOCTL */ -#define EEP_RADIOCTL_ENABLE 0x80 +#define EEP_RADIOCTL_ENABLE BIT(7) /* control commands */ #define MESSAGE_TYPE_READ 0x1 @@ -227,7 +228,6 @@ struct vnt_rcb { void *priv; struct urb *urb; struct sk_buff *skb; - int in_use; }; /* used to track bulk out irps */ @@ -244,7 +244,6 @@ struct vnt_usb_send_context { u8 pkt_no; u8 pkt_type; u8 need_ack; - u8 fb_option; bool in_use; unsigned char data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS]; }; @@ -254,16 +253,6 @@ struct vnt_usb_send_context { */ struct vnt_interrupt_buffer { u8 *data_buf; - bool in_use; -}; - -/*++ NDIS related */ - -enum { - STATUS_SUCCESS = 0, - STATUS_FAILURE, - STATUS_RESOURCES, - STATUS_PENDING, }; /* flags for options */ diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c deleted file mode 100644 index 821aae8ca402..000000000000 --- a/drivers/staging/vt6656/dpc.c +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. - * All rights reserved. - * - * File: dpc.c - * - * Purpose: handle dpc rx functions - * - * Author: Lyndon Chen - * - * Date: May 20, 2003 - * - * Functions: - * - * Revision History: - * - */ - -#include "dpc.h" -#include "device.h" -#include "mac.h" -#include "baseband.h" -#include "rf.h" - -int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb, - unsigned long bytes_received) -{ - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_supported_band *sband; - struct sk_buff *skb; - struct ieee80211_rx_status *rx_status; - struct vnt_rx_header *head; - struct vnt_rx_tail *tail; - u32 frame_size; - int ii; - u16 rx_bitrate, pay_load_with_padding; - u8 rate_idx = 0; - long rx_dbm; - - skb = ptr_rcb->skb; - rx_status = IEEE80211_SKB_RXCB(skb); - - /* [31:16]RcvByteCount ( not include 4-byte Status ) */ - head = (struct vnt_rx_header *)skb->data; - frame_size = head->wbk_status >> 16; - frame_size += 4; - - if (bytes_received != frame_size) { - dev_dbg(&priv->usb->dev, "------- WRONG Length 1\n"); - return false; - } - - if ((bytes_received > 2372) || (bytes_received <= 40)) { - /* Frame Size error drop this packet.*/ - dev_dbg(&priv->usb->dev, "------ WRONG Length 2\n"); - return false; - } - - /* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */ - /* -8TSF - 4RSR - 4SQ3 - ?Padding */ - - /* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */ - - /*Fix hardware bug => PLCP_Length error */ - if (((bytes_received - head->pay_load_len) > 27) || - ((bytes_received - head->pay_load_len) < 24) || - (bytes_received < head->pay_load_len)) { - dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n", - head->pay_load_len); - return false; - } - - sband = hw->wiphy->bands[hw->conf.chandef.chan->band]; - rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */ - - for (ii = 0; ii < sband->n_bitrates; ii++) { - if (sband->bitrates[ii].bitrate == rx_bitrate) { - rate_idx = ii; - break; - } - } - - if (ii == sband->n_bitrates) { - dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate); - return false; - } - - pay_load_with_padding = ((head->pay_load_len / 4) + - ((head->pay_load_len % 4) ? 1 : 0)) * 4; - - tail = (struct vnt_rx_tail *)(skb->data + - sizeof(*head) + pay_load_with_padding); - priv->tsf_time = le64_to_cpu(tail->tsf_time); - - if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN)) - return false; - - vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm); - - priv->bb_pre_ed_rssi = (u8)rx_dbm + 1; - priv->current_rssi = priv->bb_pre_ed_rssi; - - skb_pull(skb, sizeof(*head)); - skb_trim(skb, head->pay_load_len); - - rx_status->mactime = priv->tsf_time; - rx_status->band = hw->conf.chandef.chan->band; - rx_status->signal = rx_dbm; - rx_status->flag = 0; - rx_status->freq = hw->conf.chandef.chan->center_freq; - - if (!(tail->rsr & RSR_CRCOK)) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - - rx_status->rate_idx = rate_idx; - - if (tail->new_rsr & NEWRSR_DECRYPTOK) - rx_status->flag |= RX_FLAG_DECRYPTED; - - ieee80211_rx_irqsafe(priv->hw, skb); - - return true; -} diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h deleted file mode 100644 index e080add823cb..000000000000 --- a/drivers/staging/vt6656/dpc.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. - * All rights reserved. - * - * File: dpc.h - * - * Purpose: - * - * Author: Jerry Chen - * - * Date: Jun. 27, 2002 - * - */ - -#ifndef __DPC_H__ -#define __DPC_H__ - -#include "device.h" - -int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb, - unsigned long bytes_received); - -#endif /* __RXTX_H__ */ diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c deleted file mode 100644 index af215860be4c..000000000000 --- a/drivers/staging/vt6656/int.c +++ /dev/null @@ -1,164 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. - * All rights reserved. - * - * File: int.c - * - * Purpose: Handle USB interrupt endpoint - * - * Author: Jerry Chen - * - * Date: Apr. 2, 2004 - * - * Functions: - * - * Revision History: - * 04-02-2004 Jerry Chen: Initial release - * - */ - -#include "int.h" -#include "mac.h" -#include "power.h" -#include "usbpipe.h" - -static const u8 fallback_rate0[5][5] = { - {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M}, - {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M}, - {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M}, - {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M}, - {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M} -}; - -static const u8 fallback_rate1[5][5] = { - {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M}, - {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M}, - {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M}, - {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M}, - {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M} -}; - -int vnt_int_start_interrupt(struct vnt_private *priv) -{ - int ret = 0; - unsigned long flags; - - dev_dbg(&priv->usb->dev, "---->Interrupt Polling Thread\n"); - - spin_lock_irqsave(&priv->lock, flags); - - ret = vnt_start_interrupt_urb(priv); - - spin_unlock_irqrestore(&priv->lock, flags); - - return ret; -} - -static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr) -{ - struct vnt_usb_send_context *context; - struct ieee80211_tx_info *info; - struct ieee80211_rate *rate; - u8 tx_retry = (tsr & 0xf0) >> 4; - s8 idx; - - if (pkt_no >= priv->num_tx_context) - return -EINVAL; - - context = priv->tx_context[pkt_no]; - - if (!context->skb) - return -EINVAL; - - info = IEEE80211_SKB_CB(context->skb); - idx = info->control.rates[0].idx; - - if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) { - u8 tx_rate; - u8 retry = tx_retry; - - rate = ieee80211_get_tx_rate(priv->hw, info); - tx_rate = rate->hw_value - RATE_18M; - - if (retry > 4) - retry = 4; - - if (context->fb_option == AUTO_FB_0) - tx_rate = fallback_rate0[tx_rate][retry]; - else if (context->fb_option == AUTO_FB_1) - tx_rate = fallback_rate1[tx_rate][retry]; - - if (info->band == NL80211_BAND_5GHZ) - idx = tx_rate - RATE_6M; - else - idx = tx_rate; - } - - ieee80211_tx_info_clear_status(info); - - info->status.rates[0].count = tx_retry; - - if (!(tsr & TSR_TMO)) { - info->status.rates[0].idx = idx; - - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - info->flags |= IEEE80211_TX_STAT_ACK; - } - - ieee80211_tx_status_irqsafe(priv->hw, context->skb); - - context->in_use = false; - - return 0; -} - -void vnt_int_process_data(struct vnt_private *priv) -{ - struct vnt_interrupt_data *int_data; - struct ieee80211_low_level_stats *low_stats = &priv->low_stats; - - dev_dbg(&priv->usb->dev, "---->s_nsInterruptProcessData\n"); - - int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf; - - if (int_data->tsr0 & TSR_VALID) - vnt_int_report_rate(priv, int_data->pkt0, int_data->tsr0); - - if (int_data->tsr1 & TSR_VALID) - vnt_int_report_rate(priv, int_data->pkt1, int_data->tsr1); - - if (int_data->tsr2 & TSR_VALID) - vnt_int_report_rate(priv, int_data->pkt2, int_data->tsr2); - - if (int_data->tsr3 & TSR_VALID) - vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3); - - if (int_data->isr0 != 0) { - if (int_data->isr0 & ISR_BNTX && - priv->op_mode == NL80211_IFTYPE_AP) - vnt_schedule_command(priv, WLAN_CMD_BECON_SEND); - - if (int_data->isr0 & ISR_TBTT && - priv->hw->conf.flags & IEEE80211_CONF_PS) { - if (!priv->wake_up_count) - priv->wake_up_count = - priv->hw->conf.listen_interval; - - --priv->wake_up_count; - - /* Turn on wake up to listen next beacon */ - if (priv->wake_up_count == 1) - vnt_schedule_command(priv, - WLAN_CMD_TBTT_WAKEUP); - } - priv->current_tsf = le64_to_cpu(int_data->tsf); - - low_stats->dot11RTSSuccessCount += int_data->rts_success; - low_stats->dot11RTSFailureCount += int_data->rts_fail; - low_stats->dot11ACKFailureCount += int_data->ack_fail; - low_stats->dot11FCSErrorCount += int_data->fcs_err; - } - - priv->int_buf.in_use = false; -} diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h deleted file mode 100644 index 8a6d60569ceb..000000000000 --- a/drivers/staging/vt6656/int.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. - * All rights reserved. - * - * File: int.h - * - * Purpose: - * - * Author: Jerry Chen - * - * Date: Apr. 2, 2004 - * - */ - -#ifndef __INT_H__ -#define __INT_H__ - -#include "device.h" - -struct vnt_interrupt_data { - u8 tsr0; - u8 pkt0; - u16 time0; - u8 tsr1; - u8 pkt1; - u16 time1; - u8 tsr2; - u8 pkt2; - u16 time2; - u8 tsr3; - u8 pkt3; - u16 time3; - __le64 tsf; - u8 isr0; - u8 isr1; - u8 rts_success; - u8 rts_fail; - u8 ack_fail; - u8 fcs_err; - u8 sw[2]; -} __packed; - -int vnt_int_start_interrupt(struct vnt_private *priv); -void vnt_int_process_data(struct vnt_private *priv); - -#endif /* __INT_H__ */ diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index dcd933a6b66e..41b73f9670e2 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -144,11 +144,14 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, break; case WLAN_CIPHER_SUITE_CCMP: if (priv->local_id <= MAC_REVISION_A1) - return -EINVAL; + return -EOPNOTSUPP; key_dec_mode = KEY_CTL_CCMP; key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + return -EOPNOTSUPP; } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h index 0a42308b81e9..c532b27de37f 100644 --- a/drivers/staging/vt6656/mac.h +++ b/drivers/staging/vt6656/mac.h @@ -20,6 +20,7 @@ #ifndef __MAC_H__ #define __MAC_H__ +#include <linux/bits.h> #include "device.h" #define REV_ID_VT3253_A0 0x00 @@ -142,109 +143,109 @@ #define MAC_REG_RSPINF_A_72 0xfc /* Bits in the I2MCFG EEPROM register */ -#define I2MCFG_BOUNDCTL 0x80 -#define I2MCFG_WAITCTL 0x20 -#define I2MCFG_SCLOECTL 0x10 -#define I2MCFG_WBUSYCTL 0x08 -#define I2MCFG_NORETRY 0x04 -#define I2MCFG_I2MLDSEQ 0x02 -#define I2MCFG_I2CMFAST 0x01 +#define I2MCFG_BOUNDCTL BIT(7) +#define I2MCFG_WAITCTL BIT(5) +#define I2MCFG_SCLOECTL BIT(4) +#define I2MCFG_WBUSYCTL BIT(3) +#define I2MCFG_NORETRY BIT(2) +#define I2MCFG_I2MLDSEQ BIT(1) +#define I2MCFG_I2CMFAST BIT(0) /* Bits in the I2MCSR EEPROM register */ -#define I2MCSR_EEMW 0x80 -#define I2MCSR_EEMR 0x40 -#define I2MCSR_AUTOLD 0x08 -#define I2MCSR_NACK 0x02 -#define I2MCSR_DONE 0x01 +#define I2MCSR_EEMW BIT(7) +#define I2MCSR_EEMR BIT(6) +#define I2MCSR_AUTOLD BIT(3) +#define I2MCSR_NACK BIT(1) +#define I2MCSR_DONE BIT(0) /* Bits in the TMCTL register */ -#define TMCTL_TSUSP 0x04 -#define TMCTL_TMD 0x02 -#define TMCTL_TE 0x01 +#define TMCTL_TSUSP BIT(2) +#define TMCTL_TMD BIT(1) +#define TMCTL_TE BIT(0) /* Bits in the TFTCTL register */ -#define TFTCTL_HWUTSF 0x80 -#define TFTCTL_TBTTSYNC 0x40 -#define TFTCTL_HWUTSFEN 0x20 -#define TFTCTL_TSFCNTRRD 0x10 -#define TFTCTL_TBTTSYNCEN 0x08 -#define TFTCTL_TSFSYNCEN 0x04 -#define TFTCTL_TSFCNTRST 0x02 -#define TFTCTL_TSFCNTREN 0x01 +#define TFTCTL_HWUTSF BIT(7) +#define TFTCTL_TBTTSYNC BIT(6) +#define TFTCTL_HWUTSFEN BIT(5) +#define TFTCTL_TSFCNTRRD BIT(4) +#define TFTCTL_TBTTSYNCEN BIT(3) +#define TFTCTL_TSFSYNCEN BIT(2) +#define TFTCTL_TSFCNTRST BIT(1) +#define TFTCTL_TSFCNTREN BIT(0) /* Bits in the EnhanceCFG_0 register */ #define EnCFG_BBType_a 0x00 -#define EnCFG_BBType_b 0x01 -#define EnCFG_BBType_g 0x02 -#define EnCFG_BBType_MASK 0x03 -#define EnCFG_ProtectMd 0x20 +#define EnCFG_BBType_b BIT(0) +#define EnCFG_BBType_g BIT(1) +#define EnCFG_BBType_MASK (BIT(0) | BIT(1)) +#define EnCFG_ProtectMd BIT(5) /* Bits in the EnhanceCFG_1 register */ -#define EnCFG_BcnSusInd 0x01 -#define EnCFG_BcnSusClr 0x02 +#define EnCFG_BcnSusInd BIT(0) +#define EnCFG_BcnSusClr BIT(1) /* Bits in the EnhanceCFG_2 register */ -#define EnCFG_NXTBTTCFPSTR 0x01 -#define EnCFG_BarkerPream 0x02 -#define EnCFG_PktBurstMode 0x04 +#define EnCFG_NXTBTTCFPSTR BIT(0) +#define EnCFG_BarkerPream BIT(1) +#define EnCFG_PktBurstMode BIT(2) /* Bits in the CFG register */ -#define CFG_TKIPOPT 0x80 -#define CFG_RXDMAOPT 0x40 -#define CFG_TMOT_SW 0x20 -#define CFG_TMOT_HWLONG 0x10 +#define CFG_TKIPOPT BIT(7) +#define CFG_RXDMAOPT BIT(6) +#define CFG_TMOT_SW BIT(5) +#define CFG_TMOT_HWLONG BIT(4) #define CFG_TMOT_HW 0x00 -#define CFG_CFPENDOPT 0x08 -#define CFG_BCNSUSEN 0x04 -#define CFG_NOTXTIMEOUT 0x02 -#define CFG_NOBUFOPT 0x01 +#define CFG_CFPENDOPT BIT(3) +#define CFG_BCNSUSEN BIT(2) +#define CFG_NOTXTIMEOUT BIT(1) +#define CFG_NOBUFOPT BIT(0) /* Bits in the TEST register */ -#define TEST_LBEXT 0x80 -#define TEST_LBINT 0x40 +#define TEST_LBEXT BIT(7) +#define TEST_LBINT BIT(6) #define TEST_LBNONE 0x00 -#define TEST_SOFTINT 0x20 -#define TEST_CONTTX 0x10 -#define TEST_TXPE 0x08 -#define TEST_NAVDIS 0x04 -#define TEST_NOCTS 0x02 -#define TEST_NOACK 0x01 +#define TEST_SOFTINT BIT(5) +#define TEST_CONTTX BIT(4) +#define TEST_TXPE BIT(3) +#define TEST_NAVDIS BIT(2) +#define TEST_NOCTS BIT(1) +#define TEST_NOACK BIT(0) /* Bits in the HOSTCR register */ -#define HOSTCR_TXONST 0x80 -#define HOSTCR_RXONST 0x40 -#define HOSTCR_ADHOC 0x20 -#define HOSTCR_AP 0x10 -#define HOSTCR_TXON 0x08 -#define HOSTCR_RXON 0x04 -#define HOSTCR_MACEN 0x02 -#define HOSTCR_SOFTRST 0x01 +#define HOSTCR_TXONST BIT(7) +#define HOSTCR_RXONST BIT(6) +#define HOSTCR_ADHOC BIT(5) +#define HOSTCR_AP BIT(4) +#define HOSTCR_TXON BIT(3) +#define HOSTCR_RXON BIT(2) +#define HOSTCR_MACEN BIT(1) +#define HOSTCR_SOFTRST BIT(0) /* Bits in the MACCR register */ -#define MACCR_SYNCFLUSHOK 0x04 -#define MACCR_SYNCFLUSH 0x02 -#define MACCR_CLRNAV 0x01 +#define MACCR_SYNCFLUSHOK BIT(2) +#define MACCR_SYNCFLUSH BIT(1) +#define MACCR_CLRNAV BIT(0) /* Bits in the RCR register */ -#define RCR_SSID 0x80 -#define RCR_RXALLTYPE 0x40 -#define RCR_UNICAST 0x20 -#define RCR_BROADCAST 0x10 -#define RCR_MULTICAST 0x08 -#define RCR_WPAERR 0x04 -#define RCR_ERRCRC 0x02 -#define RCR_BSSID 0x01 +#define RCR_SSID BIT(7) +#define RCR_RXALLTYPE BIT(6) +#define RCR_UNICAST BIT(5) +#define RCR_BROADCAST BIT(4) +#define RCR_MULTICAST BIT(3) +#define RCR_WPAERR BIT(2) +#define RCR_ERRCRC BIT(1) +#define RCR_BSSID BIT(0) /* Bits in the TCR register */ -#define TCR_SYNCDCFOPT 0x02 -#define TCR_AUTOBCNTX 0x01 +#define TCR_SYNCDCFOPT BIT(1) +#define TCR_AUTOBCNTX BIT(0) /* ISR1 */ -#define ISR_GPIO3 0x40 -#define ISR_RXNOBUF 0x08 -#define ISR_MIBNEARFULL 0x04 -#define ISR_SOFTINT 0x02 -#define ISR_FETALERR 0x01 +#define ISR_GPIO3 BIT(6) +#define ISR_RXNOBUF BIT(3) +#define ISR_MIBNEARFULL BIT(2) +#define ISR_SOFTINT BIT(1) +#define ISR_FETALERR BIT(0) #define LEDSTS_STS 0x06 #define LEDSTS_TMLEN 0x78 @@ -254,85 +255,85 @@ #define LEDSTS_INTER 0x06 /* ISR0 */ -#define ISR_WATCHDOG 0x80 -#define ISR_SOFTTIMER 0x40 -#define ISR_GPIO0 0x20 -#define ISR_TBTT 0x10 -#define ISR_RXDMA0 0x08 -#define ISR_BNTX 0x04 -#define ISR_ACTX 0x01 +#define ISR_WATCHDOG BIT(7) +#define ISR_SOFTTIMER BIT(6) +#define ISR_GPIO0 BIT(5) +#define ISR_TBTT BIT(4) +#define ISR_RXDMA0 BIT(3) +#define ISR_BNTX BIT(2) +#define ISR_ACTX BIT(0) /* Bits in the PSCFG register */ -#define PSCFG_PHILIPMD 0x40 -#define PSCFG_WAKECALEN 0x20 -#define PSCFG_WAKETMREN 0x10 -#define PSCFG_BBPSPROG 0x08 -#define PSCFG_WAKESYN 0x04 -#define PSCFG_SLEEPSYN 0x02 -#define PSCFG_AUTOSLEEP 0x01 +#define PSCFG_PHILIPMD BIT(6) +#define PSCFG_WAKECALEN BIT(5) +#define PSCFG_WAKETMREN BIT(4) +#define PSCFG_BBPSPROG BIT(3) +#define PSCFG_WAKESYN BIT(2) +#define PSCFG_SLEEPSYN BIT(1) +#define PSCFG_AUTOSLEEP BIT(0) /* Bits in the PSCTL register */ -#define PSCTL_WAKEDONE 0x20 -#define PSCTL_PS 0x10 -#define PSCTL_GO2DOZE 0x08 -#define PSCTL_LNBCN 0x04 -#define PSCTL_ALBCN 0x02 -#define PSCTL_PSEN 0x01 +#define PSCTL_WAKEDONE BIT(5) +#define PSCTL_PS BIT(4) +#define PSCTL_GO2DOZE BIT(3) +#define PSCTL_LNBCN BIT(2) +#define PSCTL_ALBCN BIT(1) +#define PSCTL_PSEN BIT(0) /* Bits in the PSPWSIG register */ -#define PSSIG_WPE3 0x80 -#define PSSIG_WPE2 0x40 -#define PSSIG_WPE1 0x20 -#define PSSIG_WRADIOPE 0x10 -#define PSSIG_SPE3 0x08 -#define PSSIG_SPE2 0x04 -#define PSSIG_SPE1 0x02 -#define PSSIG_SRADIOPE 0x01 +#define PSSIG_WPE3 BIT(7) +#define PSSIG_WPE2 BIT(6) +#define PSSIG_WPE1 BIT(5) +#define PSSIG_WRADIOPE BIT(4) +#define PSSIG_SPE3 BIT(3) +#define PSSIG_SPE2 BIT(2) +#define PSSIG_SPE1 BIT(1) +#define PSSIG_SRADIOPE BIT(0) /* Bits in the BBREGCTL register */ -#define BBREGCTL_DONE 0x04 -#define BBREGCTL_REGR 0x02 -#define BBREGCTL_REGW 0x01 +#define BBREGCTL_DONE BIT(2) +#define BBREGCTL_REGR BIT(1) +#define BBREGCTL_REGW BIT(0) /* Bits in the IFREGCTL register */ -#define IFREGCTL_DONE 0x04 -#define IFREGCTL_IFRF 0x02 -#define IFREGCTL_REGW 0x01 +#define IFREGCTL_DONE BIT(2) +#define IFREGCTL_IFRF BIT(1) +#define IFREGCTL_REGW BIT(0) /* Bits in the SOFTPWRCTL register */ -#define SOFTPWRCTL_RFLEOPT 0x08 -#define SOFTPWRCTL_TXPEINV 0x02 -#define SOFTPWRCTL_SWPECTI 0x01 -#define SOFTPWRCTL_SWPAPE 0x20 -#define SOFTPWRCTL_SWCALEN 0x10 -#define SOFTPWRCTL_SWRADIO_PE 0x08 -#define SOFTPWRCTL_SWPE2 0x04 -#define SOFTPWRCTL_SWPE1 0x02 -#define SOFTPWRCTL_SWPE3 0x01 +#define SOFTPWRCTL_RFLEOPT BIT(3) +#define SOFTPWRCTL_TXPEINV BIT(1) +#define SOFTPWRCTL_SWPECTI BIT(0) +#define SOFTPWRCTL_SWPAPE BIT(5) +#define SOFTPWRCTL_SWCALEN BIT(4) +#define SOFTPWRCTL_SWRADIO_PE BIT(3) +#define SOFTPWRCTL_SWPE2 BIT(2) +#define SOFTPWRCTL_SWPE1 BIT(1) +#define SOFTPWRCTL_SWPE3 BIT(0) /* Bits in the GPIOCTL1 register */ -#define GPIO3_MD 0x20 -#define GPIO3_DATA 0x40 -#define GPIO3_INTMD 0x80 +#define GPIO3_MD BIT(5) +#define GPIO3_DATA BIT(6) +#define GPIO3_INTMD BIT(7) /* Bits in the MISCFFCTL register */ -#define MISCFFCTL_WRITE 0x0001 +#define MISCFFCTL_WRITE BIT(0) /* Loopback mode */ -#define MAC_LB_EXT 0x02 -#define MAC_LB_INTERNAL 0x01 +#define MAC_LB_EXT BIT(1) +#define MAC_LB_INTERNAL BIT(0) #define MAC_LB_NONE 0x00 /* Ethernet address filter type */ #define PKT_TYPE_NONE 0x00 /* turn off receiver */ -#define PKT_TYPE_ALL_MULTICAST 0x80 -#define PKT_TYPE_PROMISCUOUS 0x40 -#define PKT_TYPE_DIRECTED 0x20 /* obselete */ -#define PKT_TYPE_BROADCAST 0x10 -#define PKT_TYPE_MULTICAST 0x08 -#define PKT_TYPE_ERROR_WPA 0x04 -#define PKT_TYPE_ERROR_CRC 0x02 -#define PKT_TYPE_BSSID 0x01 +#define PKT_TYPE_ALL_MULTICAST BIT(7) +#define PKT_TYPE_PROMISCUOUS BIT(6) +#define PKT_TYPE_DIRECTED BIT(5) /* obselete */ +#define PKT_TYPE_BROADCAST BIT(4) +#define PKT_TYPE_MULTICAST BIT(3) +#define PKT_TYPE_ERROR_WPA BIT(2) +#define PKT_TYPE_ERROR_CRC BIT(1) +#define PKT_TYPE_BSSID BIT(0) #define Default_BI 0x200 diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 5e48b3ddb94c..8e7269c87ea9 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -21,8 +21,10 @@ */ #undef __NO_VERSION__ +#include <linux/bits.h> #include <linux/etherdevice.h> #include <linux/file.h> +#include <linux/kernel.h> #include "device.h" #include "card.h" #include "baseband.h" @@ -30,12 +32,10 @@ #include "power.h" #include "wcmd.h" #include "rxtx.h" -#include "dpc.h" #include "rf.h" #include "firmware.h" #include "usbpipe.h" #include "channel.h" -#include "int.h" /* * define module options @@ -99,7 +99,6 @@ static void vnt_set_options(struct vnt_private *priv) priv->op_mode = NL80211_IFTYPE_UNSPECIFIED; priv->bb_type = BBP_TYPE_DEF; priv->packet_type = priv->bb_type; - priv->auto_fb_ctrl = AUTO_FB_0; priv->preamble_type = 0; priv->exist_sw_net_addr = false; } @@ -109,7 +108,7 @@ static void vnt_set_options(struct vnt_private *priv) */ static int vnt_init_registers(struct vnt_private *priv) { - int ret = 0; + int ret; struct vnt_cmd_card_init *init_cmd = &priv->init_command; struct vnt_rsp_card_init *init_rsp = &priv->init_response; u8 antenna; @@ -145,7 +144,7 @@ static int vnt_init_registers(struct vnt_private *priv) init_cmd->init_class = DEVICE_INIT_COLD; init_cmd->exist_sw_net_addr = priv->exist_sw_net_addr; - for (ii = 0; ii < 6; ii++) + for (ii = 0; ii < ARRAY_SIZE(init_cmd->sw_net_addr); ii++) init_cmd->sw_net_addr[ii] = priv->current_net_addr[ii]; init_cmd->short_retry_limit = priv->short_retry_limit; init_cmd->long_retry_limit = priv->long_retry_limit; @@ -184,7 +183,7 @@ static int vnt_init_registers(struct vnt_private *priv) priv->cck_pwr = priv->eeprom[EEP_OFS_PWR_CCK]; priv->ofdm_pwr_g = priv->eeprom[EEP_OFS_PWR_OFDMG]; /* load power table */ - for (ii = 0; ii < 14; ii++) { + for (ii = 0; ii < ARRAY_SIZE(priv->cck_pwr_tbl); ii++) { priv->cck_pwr_tbl[ii] = priv->eeprom[ii + EEP_OFS_CCK_PWR_TBL]; if (priv->cck_pwr_tbl[ii] == 0) @@ -200,7 +199,7 @@ static int vnt_init_registers(struct vnt_private *priv) * original zonetype is USA, but custom zonetype is Europe, * then need to recover 12, 13, 14 channels with 11 channel */ - for (ii = 11; ii < 14; ii++) { + for (ii = 11; ii < ARRAY_SIZE(priv->cck_pwr_tbl); ii++) { priv->cck_pwr_tbl[ii] = priv->cck_pwr_tbl[10]; priv->ofdm_pwr_tbl[ii] = priv->ofdm_pwr_tbl[10]; } @@ -261,9 +260,6 @@ static int vnt_init_registers(struct vnt_private *priv) if (ret) goto end; - /* get Auto Fall Back type */ - priv->auto_fb_ctrl = AUTO_FB_0; - /* default Auto Mode */ priv->bb_type = BB_TYPE_11G; @@ -370,7 +366,7 @@ static int vnt_init_registers(struct vnt_private *priv) if (ret) goto end; - ret = vnt_mac_reg_bits_on(priv, MAC_REG_GPIOCTL0, 0x01); + ret = vnt_mac_reg_bits_on(priv, MAC_REG_GPIOCTL0, BIT(0)); if (ret) goto end; @@ -435,7 +431,7 @@ static void vnt_free_int_bufs(struct vnt_private *priv) static int vnt_alloc_bufs(struct vnt_private *priv) { - int ret = 0; + int ret; struct vnt_usb_send_context *tx_context; struct vnt_rcb *rcb; int ii; @@ -484,9 +480,6 @@ static int vnt_alloc_bufs(struct vnt_private *priv) ret = -ENOMEM; goto free_rx_tx; } - - rcb->in_use = false; - /* submit rx urb */ ret = vnt_submit_rx_urb(priv, rcb); if (ret) @@ -528,7 +521,7 @@ static void vnt_tx_80211(struct ieee80211_hw *hw, static int vnt_start(struct ieee80211_hw *hw) { - int ret = 0; + int ret; struct vnt_private *priv = hw->priv; priv->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS; @@ -553,7 +546,7 @@ static int vnt_start(struct ieee80211_hw *hw) priv->int_interval = 1; /* bInterval is set to 1 */ - ret = vnt_int_start_interrupt(priv); + ret = vnt_start_interrupt_urb(priv); if (ret) goto free_all; @@ -798,12 +791,11 @@ static u64 vnt_prepare_multicast(struct ieee80211_hw *hw, struct vnt_private *priv = hw->priv; struct netdev_hw_addr *ha; u64 mc_filter = 0; - u32 bit_nr = 0; + u32 bit_nr; netdev_hw_addr_list_for_each(ha, mc_list) { bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - - mc_filter |= 1ULL << (bit_nr & 0x3f); + mc_filter |= BIT_ULL(bit_nr); } priv->mc_list_count = mc_list->count; @@ -862,9 +854,7 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (cmd) { case SET_KEY: - if (vnt_set_keys(hw, sta, vif, key)) - return -EOPNOTSUPP; - break; + return vnt_set_keys(hw, sta, vif, key); case DISABLE_KEY: if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) clear_bit(key->hw_key_idx, &priv->key_entry_inuse); @@ -973,7 +963,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) struct vnt_private *priv; struct ieee80211_hw *hw; struct wiphy *wiphy; - int rc = 0; + int rc; udev = usb_get_dev(interface_to_usbdev(intf)); diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 29caba728906..9439d190f431 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -13,7 +13,6 @@ * * Functions: * vnt_generate_tx_parameter - Generate tx dma required parameter. - * vnt_get_duration_le - get tx data required duration * vnt_get_rtscts_duration_le- get rtx/cts required duration * vnt_get_rtscts_rsvtime_le- get rts/cts reserved time * vnt_get_rsvtime- get frame reserved time @@ -39,30 +38,12 @@ static const u16 vnt_time_stampoff[2][MAX_RATE] = { {384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, }; -static const u16 vnt_fb_opt0[2][5] = { - {RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, /* fallback_rate0 */ - {RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, /* fallback_rate1 */ -}; - -static const u16 vnt_fb_opt1[2][5] = { - {RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, /* fallback_rate0 */ - {RATE_6M, RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */ -}; - #define RTSDUR_BB 0 #define RTSDUR_BA 1 #define RTSDUR_AA 2 #define CTSDUR_BA 3 -#define RTSDUR_BA_F0 4 -#define RTSDUR_AA_F0 5 -#define RTSDUR_BA_F1 6 -#define RTSDUR_AA_F1 7 -#define CTSDUR_BA_F0 8 -#define CTSDUR_BA_F1 9 #define DATADUR_B 10 #define DATADUR_A 11 -#define DATADUR_A_F0 12 -#define DATADUR_A_F1 13 static struct vnt_usb_send_context *vnt_get_free_context(struct vnt_private *priv) @@ -184,27 +165,6 @@ static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv, u8 rsv_type, return cpu_to_le16((u16)rrv_time); } -static __le16 vnt_get_duration_le(struct vnt_private *priv, u8 pkt_type, - int need_ack) -{ - u32 ack_time = 0; - - if (need_ack) { - if (pkt_type == PK_TYPE_11B) - ack_time = vnt_get_frame_time(priv->preamble_type, - pkt_type, 14, - priv->top_cck_basic_rate); - else - ack_time = vnt_get_frame_time(priv->preamble_type, - pkt_type, 14, - priv->top_ofdm_basic_rate); - - return cpu_to_le16((u16)(priv->sifs + ack_time)); - } - - return 0; -} - static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context, u8 dur_type, u8 pkt_type, u16 rate) { @@ -216,8 +176,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context, switch (dur_type) { case RTSDUR_BB: case RTSDUR_BA: - case RTSDUR_BA_F0: - case RTSDUR_BA_F1: cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14, priv->top_cck_basic_rate); dur_time = cts_time + 2 * priv->sifs + @@ -226,8 +184,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context, break; case RTSDUR_AA: - case RTSDUR_AA_F0: - case RTSDUR_AA_F1: cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14, priv->top_ofdm_basic_rate); dur_time = cts_time + 2 * priv->sifs + @@ -236,8 +192,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context, break; case CTSDUR_BA: - case CTSDUR_BA_F0: - case CTSDUR_BA_F1: dur_time = priv->sifs + vnt_get_rsvtime(priv, pkt_type, frame_length, rate, need_ack); break; @@ -270,7 +224,6 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context, (struct ieee80211_hdr *)tx_context->skb->data; u32 frame_len = tx_context->frame_len; u16 rate = tx_context->tx_rate; - u8 need_ack = tx_context->need_ack; /* Get SignalField,ServiceField,Length */ vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a); @@ -278,16 +231,8 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context, PK_TYPE_11B, &buf->b); /* Get Duration and TimeStamp */ - if (ieee80211_is_nullfunc(hdr->frame_control)) { - buf->duration_a = hdr->duration_id; - buf->duration_b = hdr->duration_id; - } else { - buf->duration_a = vnt_get_duration_le(priv, - tx_context->pkt_type, need_ack); - buf->duration_b = vnt_get_duration_le(priv, - PK_TYPE_11B, need_ack); - } - + buf->duration_a = hdr->duration_id; + buf->duration_b = hdr->duration_id; buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate); buf->time_stamp_off_b = vnt_time_stamp_off(priv, priv->top_cck_basic_rate); @@ -297,63 +242,6 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context, return le16_to_cpu(buf->duration_a); } -static u16 vnt_rxtx_datahead_g_fb(struct vnt_usb_send_context *tx_context, - struct vnt_tx_datahead_g_fb *buf) -{ - struct vnt_private *priv = tx_context->priv; - u32 frame_len = tx_context->frame_len; - u16 rate = tx_context->tx_rate; - u8 need_ack = tx_context->need_ack; - - /* Get SignalField,ServiceField,Length */ - vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a); - - vnt_get_phy_field(priv, frame_len, priv->top_cck_basic_rate, - PK_TYPE_11B, &buf->b); - - /* Get Duration and TimeStamp */ - buf->duration_a = vnt_get_duration_le(priv, tx_context->pkt_type, - need_ack); - buf->duration_b = vnt_get_duration_le(priv, PK_TYPE_11B, need_ack); - - buf->duration_a_f0 = vnt_get_duration_le(priv, tx_context->pkt_type, - need_ack); - buf->duration_a_f1 = vnt_get_duration_le(priv, tx_context->pkt_type, - need_ack); - - buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate); - buf->time_stamp_off_b = vnt_time_stamp_off(priv, - priv->top_cck_basic_rate); - - tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr); - - return le16_to_cpu(buf->duration_a); -} - -static u16 vnt_rxtx_datahead_a_fb(struct vnt_usb_send_context *tx_context, - struct vnt_tx_datahead_a_fb *buf) -{ - struct vnt_private *priv = tx_context->priv; - u16 rate = tx_context->tx_rate; - u8 pkt_type = tx_context->pkt_type; - u8 need_ack = tx_context->need_ack; - u32 frame_len = tx_context->frame_len; - - /* Get SignalField,ServiceField,Length */ - vnt_get_phy_field(priv, frame_len, rate, pkt_type, &buf->a); - /* Get Duration and TimeStampOff */ - buf->duration = vnt_get_duration_le(priv, pkt_type, need_ack); - - buf->duration_f0 = vnt_get_duration_le(priv, pkt_type, need_ack); - buf->duration_f1 = vnt_get_duration_le(priv, pkt_type, need_ack); - - buf->time_stamp_off = vnt_time_stamp_off(priv, rate); - - tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr); - - return le16_to_cpu(buf->duration); -} - static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context, struct vnt_tx_datahead_ab *buf) { @@ -362,20 +250,13 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context, (struct ieee80211_hdr *)tx_context->skb->data; u32 frame_len = tx_context->frame_len; u16 rate = tx_context->tx_rate; - u8 need_ack = tx_context->need_ack; /* Get SignalField,ServiceField,Length */ vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->ab); /* Get Duration and TimeStampOff */ - if (ieee80211_is_nullfunc(hdr->frame_control)) { - buf->duration = hdr->duration_id; - } else { - buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type, - need_ack); - } - + buf->duration = hdr->duration_id; buf->time_stamp_off = vnt_time_stamp_off(priv, rate); tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr); @@ -426,50 +307,6 @@ static u16 vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context, return vnt_rxtx_datahead_g(tx_context, &buf->data_head); } -static u16 vnt_rxtx_rts_g_fb_head(struct vnt_usb_send_context *tx_context, - struct vnt_rts_g_fb *buf) -{ - struct vnt_private *priv = tx_context->priv; - u16 current_rate = tx_context->tx_rate; - u16 rts_frame_len = 20; - - vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate, - PK_TYPE_11B, &buf->b); - vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate, - tx_context->pkt_type, &buf->a); - - buf->duration_bb = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BB, - PK_TYPE_11B, - priv->top_cck_basic_rate); - buf->duration_aa = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA, - tx_context->pkt_type, - current_rate); - buf->duration_ba = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA, - tx_context->pkt_type, - current_rate); - - buf->rts_duration_ba_f0 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA_F0, - tx_context->pkt_type, - priv->tx_rate_fb0); - buf->rts_duration_aa_f0 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F0, - tx_context->pkt_type, - priv->tx_rate_fb0); - buf->rts_duration_ba_f1 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA_F1, - tx_context->pkt_type, - priv->tx_rate_fb1); - buf->rts_duration_aa_f1 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F1, - tx_context->pkt_type, - priv->tx_rate_fb1); - - vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration_aa); - - return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head); -} - static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context, struct vnt_rts_ab *buf) { @@ -489,71 +326,6 @@ static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context, return vnt_rxtx_datahead_ab(tx_context, &buf->data_head); } -static u16 vnt_rxtx_rts_a_fb_head(struct vnt_usb_send_context *tx_context, - struct vnt_rts_a_fb *buf) -{ - struct vnt_private *priv = tx_context->priv; - u16 current_rate = tx_context->tx_rate; - u16 rts_frame_len = 20; - - vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate, - tx_context->pkt_type, &buf->a); - - buf->duration = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA, - tx_context->pkt_type, - current_rate); - - buf->rts_duration_f0 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F0, - tx_context->pkt_type, - priv->tx_rate_fb0); - - buf->rts_duration_f1 = - vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F1, - tx_context->pkt_type, - priv->tx_rate_fb1); - - vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration); - - return vnt_rxtx_datahead_a_fb(tx_context, &buf->data_head); -} - -static u16 vnt_fill_cts_fb_head(struct vnt_usb_send_context *tx_context, - union vnt_tx_data_head *head) -{ - struct vnt_private *priv = tx_context->priv; - struct vnt_cts_fb *buf = &head->cts_g_fb; - u32 cts_frame_len = 14; - u16 current_rate = tx_context->tx_rate; - - /* Get SignalField,ServiceField,Length */ - vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate, - PK_TYPE_11B, &buf->b); - - buf->duration_ba = - vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA, - tx_context->pkt_type, - current_rate); - /* Get CTSDuration_ba_f0 */ - buf->cts_duration_ba_f0 = - vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F0, - tx_context->pkt_type, - priv->tx_rate_fb0); - /* Get CTSDuration_ba_f1 */ - buf->cts_duration_ba_f1 = - vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F1, - tx_context->pkt_type, - priv->tx_rate_fb1); - /* Get CTS Frame body */ - buf->data.duration = buf->duration_ba; - buf->data.frame_control = - cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); - - ether_addr_copy(buf->data.ra, priv->current_net_addr); - - return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head); -} - static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context, union vnt_tx_data_head *head) { @@ -606,9 +378,6 @@ static u16 vnt_rxtx_rts(struct vnt_usb_send_context *tx_context, if (need_mic) head = &tx_head->tx_rts.tx.mic.head; - if (tx_context->fb_option) - return vnt_rxtx_rts_g_fb_head(tx_context, &head->rts_g_fb); - return vnt_rxtx_rts_g_head(tx_context, &head->rts_g); } @@ -633,10 +402,6 @@ static u16 vnt_rxtx_cts(struct vnt_usb_send_context *tx_context, if (need_mic) head = &tx_head->tx_cts.tx.mic.head; - /* Fill CTS */ - if (tx_context->fb_option) - return vnt_fill_cts_fb_head(tx_context, head); - return vnt_fill_cts_head(tx_context, head); } @@ -664,18 +429,9 @@ static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context, buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 2, tx_context->pkt_type, frame_len, current_rate); - if (tx_context->fb_option && - tx_context->pkt_type == PK_TYPE_11A) - return vnt_rxtx_rts_a_fb_head(tx_context, - &head->rts_a_fb); - return vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab); } - if (tx_context->pkt_type == PK_TYPE_11A) - return vnt_rxtx_datahead_a_fb(tx_context, - &head->data_head_a_fb); - return vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab); } @@ -792,7 +548,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) struct vnt_usb_send_context *tx_context; unsigned long flags; u16 tx_bytes, tx_header_size, tx_body_size, current_rate, duration_id; - u8 pkt_type, fb_option = AUTO_FB_NONE; + u8 pkt_type; bool need_rts = false; bool need_mic = false; @@ -912,33 +668,6 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) tx_buffer_head->current_rate = cpu_to_le16(current_rate); - /* legacy rates TODO use ieee80211_tx_rate */ - if (current_rate >= RATE_18M && ieee80211_is_data(hdr->frame_control)) { - if (priv->auto_fb_ctrl == AUTO_FB_0) { - tx_buffer_head->fifo_ctl |= - cpu_to_le16(FIFOCTL_AUTO_FB_0); - - priv->tx_rate_fb0 = - vnt_fb_opt0[FB_RATE0][current_rate - RATE_18M]; - priv->tx_rate_fb1 = - vnt_fb_opt0[FB_RATE1][current_rate - RATE_18M]; - - fb_option = AUTO_FB_0; - } else if (priv->auto_fb_ctrl == AUTO_FB_1) { - tx_buffer_head->fifo_ctl |= - cpu_to_le16(FIFOCTL_AUTO_FB_1); - - priv->tx_rate_fb0 = - vnt_fb_opt1[FB_RATE0][current_rate - RATE_18M]; - priv->tx_rate_fb1 = - vnt_fb_opt1[FB_RATE1][current_rate - RATE_18M]; - - fb_option = AUTO_FB_1; - } - } - - tx_context->fb_option = fb_option; - duration_id = vnt_generate_tx_parameter(tx_context, tx_buffer, &mic_hdr, need_mic, need_rts); @@ -954,8 +683,6 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) memcpy(tx_context->hdr, skb->data, tx_body_size); - hdr->duration_id = cpu_to_le16(duration_id); - if (info->control.hw_key) { tx_key = info->control.hw_key; if (tx_key->keylen > 0) @@ -977,7 +704,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) spin_lock_irqsave(&priv->lock, flags); - if (vnt_tx_context(priv, tx_context) != STATUS_PENDING) { + if (vnt_tx_context(priv, tx_context)) { spin_unlock_irqrestore(&priv->lock, flags); return -EIO; } @@ -1021,9 +748,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb) vnt_get_phy_field(priv, frame_size, current_rate, PK_TYPE_11A, &short_head->ab); - /* Get Duration and TimeStampOff */ - short_head->duration = vnt_get_duration_le(priv, - PK_TYPE_11A, false); + /* Get TimeStampOff */ short_head->time_stamp_off = vnt_time_stamp_off(priv, current_rate); } else { @@ -1034,9 +759,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb) vnt_get_phy_field(priv, frame_size, current_rate, PK_TYPE_11B, &short_head->ab); - /* Get Duration and TimeStampOff */ - short_head->duration = vnt_get_duration_le(priv, - PK_TYPE_11B, false); + /* Get TimeStampOff */ short_head->time_stamp_off = vnt_time_stamp_off(priv, current_rate); } @@ -1045,6 +768,9 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb) mgmt_hdr = &beacon_buffer->mgmt_hdr; memcpy(mgmt_hdr, skb->data, skb->len); + /* Get Duration */ + short_head->duration = mgmt_hdr->duration; + /* time stamp always 0 */ mgmt_hdr->u.beacon.timestamp = 0; @@ -1071,7 +797,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb) spin_lock_irqsave(&priv->lock, flags); - if (vnt_tx_context(priv, context) != STATUS_PENDING) + if (vnt_tx_context(priv, context)) ieee80211_free_txskb(priv->hw, context->skb); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h index d528607e02bf..0e6226af7d41 100644 --- a/drivers/staging/vt6656/rxtx.h +++ b/drivers/staging/vt6656/rxtx.h @@ -73,18 +73,6 @@ struct vnt_tx_datahead_g { struct ieee80211_hdr hdr; } __packed; -struct vnt_tx_datahead_g_fb { - struct vnt_phy_field b; - struct vnt_phy_field a; - __le16 duration_b; - __le16 duration_a; - __le16 duration_a_f0; - __le16 duration_a_f1; - __le16 time_stamp_off_b; - __le16 time_stamp_off_a; - struct ieee80211_hdr hdr; -} __packed; - struct vnt_tx_datahead_ab { struct vnt_phy_field ab; __le16 duration; @@ -92,15 +80,6 @@ struct vnt_tx_datahead_ab { struct ieee80211_hdr hdr; } __packed; -struct vnt_tx_datahead_a_fb { - struct vnt_phy_field a; - __le16 duration; - __le16 time_stamp_off; - __le16 duration_f0; - __le16 duration_f1; - struct ieee80211_hdr hdr; -} __packed; - /* RTS buffer header */ struct vnt_rts_g { struct vnt_phy_field b; @@ -113,21 +92,6 @@ struct vnt_rts_g { struct vnt_tx_datahead_g data_head; } __packed; -struct vnt_rts_g_fb { - struct vnt_phy_field b; - struct vnt_phy_field a; - __le16 duration_ba; - __le16 duration_aa; - __le16 duration_bb; - u16 wReserved; - __le16 rts_duration_ba_f0; - __le16 rts_duration_aa_f0; - __le16 rts_duration_ba_f1; - __le16 rts_duration_aa_f1; - struct ieee80211_rts data; - struct vnt_tx_datahead_g_fb data_head; -} __packed; - struct vnt_rts_ab { struct vnt_phy_field ab; __le16 duration; @@ -136,16 +100,6 @@ struct vnt_rts_ab { struct vnt_tx_datahead_ab data_head; } __packed; -struct vnt_rts_a_fb { - struct vnt_phy_field a; - __le16 duration; - u16 wReserved; - __le16 rts_duration_f0; - __le16 rts_duration_f1; - struct ieee80211_rts data; - struct vnt_tx_datahead_a_fb data_head; -} __packed; - /* CTS buffer header */ struct vnt_cts { struct vnt_phy_field b; @@ -156,29 +110,14 @@ struct vnt_cts { struct vnt_tx_datahead_g data_head; } __packed; -struct vnt_cts_fb { - struct vnt_phy_field b; - __le16 duration_ba; - u16 wReserved; - __le16 cts_duration_ba_f0; - __le16 cts_duration_ba_f1; - struct ieee80211_cts data; - u16 reserved2; - struct vnt_tx_datahead_g_fb data_head; -} __packed; - union vnt_tx_data_head { /* rts g */ struct vnt_rts_g rts_g; - struct vnt_rts_g_fb rts_g_fb; /* rts a/b */ struct vnt_rts_ab rts_ab; - struct vnt_rts_a_fb rts_a_fb; /* cts g */ struct vnt_cts cts_g; - struct vnt_cts_fb cts_g_fb; /* no rts/cts */ - struct vnt_tx_datahead_a_fb data_head_a_fb; struct vnt_tx_datahead_ab data_head_ab; }; diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index 7bfccc48a366..eae211e5860f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -24,12 +24,12 @@ * */ -#include "int.h" #include "rxtx.h" -#include "dpc.h" #include "desc.h" #include "device.h" #include "usbpipe.h" +#include "mac.h" +#include "rf.h" #define USB_CTL_WAIT 500 /* ms */ @@ -139,6 +139,90 @@ int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data) reg_off, reg, sizeof(u8), data); } +static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr) +{ + struct vnt_usb_send_context *context; + struct ieee80211_tx_info *info; + u8 tx_retry = (tsr & 0xf0) >> 4; + s8 idx; + + if (pkt_no >= priv->num_tx_context) + return -EINVAL; + + context = priv->tx_context[pkt_no]; + + if (!context->skb) + return -EINVAL; + + info = IEEE80211_SKB_CB(context->skb); + idx = info->control.rates[0].idx; + + ieee80211_tx_info_clear_status(info); + + info->status.rates[0].count = tx_retry; + + if (!(tsr & TSR_TMO)) { + info->status.rates[0].idx = idx; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_ACK; + } + + ieee80211_tx_status_irqsafe(priv->hw, context->skb); + + context->in_use = false; + + return 0; +} + +static void vnt_int_process_data(struct vnt_private *priv) +{ + struct vnt_interrupt_data *int_data; + struct ieee80211_low_level_stats *low_stats = &priv->low_stats; + + dev_dbg(&priv->usb->dev, "---->s_nsInterruptProcessData\n"); + + int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf; + + if (int_data->tsr0 & TSR_VALID) + vnt_int_report_rate(priv, int_data->pkt0, int_data->tsr0); + + if (int_data->tsr1 & TSR_VALID) + vnt_int_report_rate(priv, int_data->pkt1, int_data->tsr1); + + if (int_data->tsr2 & TSR_VALID) + vnt_int_report_rate(priv, int_data->pkt2, int_data->tsr2); + + if (int_data->tsr3 & TSR_VALID) + vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3); + + if (int_data->isr0 != 0) { + if (int_data->isr0 & ISR_BNTX && + priv->op_mode == NL80211_IFTYPE_AP) + vnt_schedule_command(priv, WLAN_CMD_BECON_SEND); + + if (int_data->isr0 & ISR_TBTT && + priv->hw->conf.flags & IEEE80211_CONF_PS) { + if (!priv->wake_up_count) + priv->wake_up_count = + priv->hw->conf.listen_interval; + + --priv->wake_up_count; + + /* Turn on wake up to listen next beacon */ + if (priv->wake_up_count == 1) + vnt_schedule_command(priv, + WLAN_CMD_TBTT_WAKEUP); + } + priv->current_tsf = le64_to_cpu(int_data->tsf); + + low_stats->dot11RTSSuccessCount += int_data->rts_success; + low_stats->dot11RTSFailureCount += int_data->rts_fail; + low_stats->dot11ACKFailureCount += int_data->ack_fail; + low_stats->dot11FCSErrorCount += int_data->fcs_err; + } +} + static void vnt_start_interrupt_urb_complete(struct urb *urb) { struct vnt_private *priv = urb->context; @@ -151,37 +235,26 @@ static void vnt_start_interrupt_urb_complete(struct urb *urb) case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: - priv->int_buf.in_use = false; return; default: break; } - if (status) { - priv->int_buf.in_use = false; - + if (status) dev_dbg(&priv->usb->dev, "%s status = %d\n", __func__, status); - } else { + else vnt_int_process_data(priv); - } status = usb_submit_urb(priv->interrupt_urb, GFP_ATOMIC); if (status) dev_dbg(&priv->usb->dev, "Submit int URB failed %d\n", status); - else - priv->int_buf.in_use = true; } int vnt_start_interrupt_urb(struct vnt_private *priv) { int ret = 0; - if (priv->int_buf.in_use) { - ret = -EBUSY; - goto err; - } - - priv->int_buf.in_use = true; + dev_dbg(&priv->usb->dev, "---->Interrupt Polling Thread\n"); usb_fill_int_urb(priv->interrupt_urb, priv->usb, @@ -193,17 +266,110 @@ int vnt_start_interrupt_urb(struct vnt_private *priv) priv->int_interval); ret = usb_submit_urb(priv->interrupt_urb, GFP_ATOMIC); - if (ret) { + if (ret) dev_dbg(&priv->usb->dev, "Submit int URB failed %d\n", ret); - goto err_submit; + + return ret; +} + +static int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb, + unsigned long bytes_received) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_supported_band *sband; + struct sk_buff *skb; + struct ieee80211_rx_status *rx_status; + struct vnt_rx_header *head; + struct vnt_rx_tail *tail; + u32 frame_size; + int ii; + u16 rx_bitrate, pay_load_with_padding; + u8 rate_idx = 0; + long rx_dbm; + + skb = ptr_rcb->skb; + rx_status = IEEE80211_SKB_RXCB(skb); + + /* [31:16]RcvByteCount ( not include 4-byte Status ) */ + head = (struct vnt_rx_header *)skb->data; + frame_size = head->wbk_status >> 16; + frame_size += 4; + + if (bytes_received != frame_size) { + dev_dbg(&priv->usb->dev, "------- WRONG Length 1\n"); + return false; } - return 0; + if ((bytes_received > 2372) || (bytes_received <= 40)) { + /* Frame Size error drop this packet.*/ + dev_dbg(&priv->usb->dev, "------ WRONG Length 2\n"); + return false; + } -err_submit: - priv->int_buf.in_use = false; -err: - return ret; + /* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */ + /* -8TSF - 4RSR - 4SQ3 - ?Padding */ + + /* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */ + + /*Fix hardware bug => PLCP_Length error */ + if (((bytes_received - head->pay_load_len) > 27) || + ((bytes_received - head->pay_load_len) < 24) || + (bytes_received < head->pay_load_len)) { + dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n", + head->pay_load_len); + return false; + } + + sband = hw->wiphy->bands[hw->conf.chandef.chan->band]; + rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */ + + for (ii = 0; ii < sband->n_bitrates; ii++) { + if (sband->bitrates[ii].bitrate == rx_bitrate) { + rate_idx = ii; + break; + } + } + + if (ii == sband->n_bitrates) { + dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate); + return false; + } + + pay_load_with_padding = ((head->pay_load_len / 4) + + ((head->pay_load_len % 4) ? 1 : 0)) * 4; + + tail = (struct vnt_rx_tail *)(skb->data + + sizeof(*head) + pay_load_with_padding); + priv->tsf_time = le64_to_cpu(tail->tsf_time); + + if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN)) + return false; + + vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm); + + priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1; + priv->current_rssi = priv->bb_pre_ed_rssi; + + skb_pull(skb, sizeof(*head)); + skb_trim(skb, head->pay_load_len); + + rx_status->mactime = priv->tsf_time; + rx_status->band = hw->conf.chandef.chan->band; + rx_status->signal = rx_dbm; + rx_status->flag = 0; + rx_status->freq = hw->conf.chandef.chan->center_freq; + + if (!(tail->rsr & RSR_CRCOK)) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + rx_status->rate_idx = rate_idx; + + if (tail->new_rsr & NEWRSR_DECRYPTOK) + rx_status->flag |= RX_FLAG_DECRYPTED; + + ieee80211_rx_irqsafe(priv->hw, skb); + + return true; } static void vnt_submit_rx_urb_complete(struct urb *urb) @@ -227,10 +393,8 @@ static void vnt_submit_rx_urb_complete(struct urb *urb) if (urb->actual_length) { if (vnt_rx_data(priv, rcb, urb->actual_length)) { rcb->skb = dev_alloc_skb(priv->rx_buf_sz); - if (!rcb->skb) { - rcb->in_use = false; + if (!rcb->skb) return; - } } else { skb_push(rcb->skb, skb_headroom(rcb->skb)); skb_trim(rcb->skb, 0); @@ -240,11 +404,8 @@ static void vnt_submit_rx_urb_complete(struct urb *urb) skb_tailroom(rcb->skb)); } - if (usb_submit_urb(urb, GFP_ATOMIC)) { + if (usb_submit_urb(urb, GFP_ATOMIC)) dev_dbg(&priv->usb->dev, "Failed to re submit rx skb\n"); - - rcb->in_use = false; - } } int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb) @@ -267,13 +428,8 @@ int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb) rcb); ret = usb_submit_urb(urb, GFP_ATOMIC); - if (ret) { + if (ret) dev_dbg(&priv->usb->dev, "Submit Rx URB failed %d\n", ret); - goto end; - } - - rcb->in_use = true; - end: return ret; } @@ -317,7 +473,7 @@ int vnt_tx_context(struct vnt_private *priv, if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) { context->in_use = false; - return STATUS_RESOURCES; + return -ENODEV; } usb_fill_bulk_urb(urb, @@ -333,8 +489,7 @@ int vnt_tx_context(struct vnt_private *priv, dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status); context->in_use = false; - return STATUS_FAILURE; } - return STATUS_PENDING; + return status; } diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h index 4e3341bc3221..35697b58d748 100644 --- a/drivers/staging/vt6656/usbpipe.h +++ b/drivers/staging/vt6656/usbpipe.h @@ -18,6 +18,29 @@ #include "device.h" +struct vnt_interrupt_data { + u8 tsr0; + u8 pkt0; + u16 time0; + u8 tsr1; + u8 pkt1; + u16 time1; + u8 tsr2; + u8 pkt2; + u16 time2; + u8 tsr3; + u8 pkt3; + u16 time3; + __le64 tsf; + u8 isr0; + u8 isr1; + u8 rts_success; + u8 rts_fail; + u8 ack_fail; + u8 fcs_err; + u8 sw[2]; +} __packed; + #define VNT_REG_BLOCK_SIZE 64 int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt index 26de6762b942..17db67559f5e 100644 --- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt +++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt @@ -6,7 +6,7 @@ SPI You have to declare the WFxxx chip in your device tree. Required properties: - - compatible: Should be "silabs,wfx-spi" + - compatible: Should be "silabs,wf200" - reg: Chip select address of device - spi-max-frequency: Maximum SPI clocking speed of device in Hz - interrupts-extended: Should contain interrupt line (interrupt-parent + @@ -15,6 +15,7 @@ Required properties: Optional properties: - reset-gpios: phandle of gpio that will be used to reset chip during probe. Without this property, you may encounter issues with warm boot. + (Legacy: when compatible == "silabs,wfx-spi", the gpio is inverted.) Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional SPI connection related properties, @@ -23,12 +24,12 @@ Example: &spi1 { wfx { - compatible = "silabs,wfx-spi"; + compatible = "silabs,wf200"; pinctrl-names = "default"; pinctrl-0 = <&wfx_irq &wfx_gpios>; interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>; wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio 13 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; reg = <0>; spi-max-frequency = <42000000>; }; @@ -44,7 +45,7 @@ case. Thus declaring WFxxx chip in device tree is strongly recommended (and may become mandatory in the future). Required properties: - - compatible: Should be "silabs,wfx-sdio" + - compatible: Should be "silabs,wf200" - reg: Should be 1 In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx. @@ -70,7 +71,7 @@ Example: #size = <0>; mmc@1 { - compatible = "silabs,wfx-sdio"; + compatible = "silabs,wf200"; reg = <1>; pinctrl-names = "default"; pinctrl-0 = <&wfx_wakeup>; @@ -93,5 +94,5 @@ Some properties are recognized either by SPI and SDIO versions: Must contains 64 hexadecimal digits. Not supported in current version. WFx driver also supports `mac-address` and `local-mac-address` as described in -Documentation/devicetree/binding/net/ethernet.txt +Documentation/devicetree/bindings/net/ethernet.txt diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c index 983c41d1fe7c..9fcab00a3733 100644 --- a/drivers/staging/wfx/bh.c +++ b/drivers/staging/wfx/bh.c @@ -20,13 +20,13 @@ static void device_wakeup(struct wfx_dev *wdev) { if (!wdev->pdata.gpio_wakeup) return; - if (gpiod_get_value(wdev->pdata.gpio_wakeup)) + if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup)) return; - gpiod_set_value(wdev->pdata.gpio_wakeup, 1); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); if (wfx_api_older_than(wdev, 1, 4)) { if (!completion_done(&wdev->hif.ctrl_ready)) - udelay(2000); + usleep_range(2000, 2500); } else { // completion.h does not provide any function to wait // completion without consume it (a kind of @@ -45,7 +45,7 @@ static void device_release(struct wfx_dev *wdev) if (!wdev->pdata.gpio_wakeup) return; - gpiod_set_value(wdev->pdata.gpio_wakeup, 0); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); } static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c index f8901164c206..dedc3ff58d3e 100644 --- a/drivers/staging/wfx/bus_sdio.c +++ b/drivers/staging/wfx/bus_sdio.c @@ -200,25 +200,23 @@ static int wfx_sdio_probe(struct sdio_func *func, if (ret) goto err0; - ret = wfx_sdio_irq_subscribe(bus); - if (ret) - goto err1; - bus->core = wfx_init_common(&func->dev, &wfx_sdio_pdata, &wfx_sdio_hwbus_ops, bus); if (!bus->core) { ret = -EIO; - goto err2; + goto err1; } + ret = wfx_sdio_irq_subscribe(bus); + if (ret) + goto err1; + ret = wfx_probe(bus->core); if (ret) - goto err3; + goto err2; return 0; -err3: - wfx_free_common(bus->core); err2: wfx_sdio_irq_unsubscribe(bus); err1: @@ -234,7 +232,6 @@ static void wfx_sdio_remove(struct sdio_func *func) struct wfx_sdio_priv *bus = sdio_get_drvdata(func); wfx_release(bus->core); - wfx_free_common(bus->core); wfx_sdio_irq_unsubscribe(bus); sdio_claim_host(func); sdio_disable_func(func); @@ -254,6 +251,7 @@ MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids); #ifdef CONFIG_OF static const struct of_device_id wfx_sdio_of_match[] = { { .compatible = "silabs,wfx-sdio" }, + { .compatible = "silabs,wf200" }, { }, }; MODULE_DEVICE_TABLE(of, wfx_sdio_of_match); diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c index 40bc33035de2..61e99b09decb 100644 --- a/drivers/staging/wfx/bus_spi.c +++ b/drivers/staging/wfx/bus_spi.c @@ -27,6 +27,8 @@ MODULE_PARM_DESC(gpio_reset, "gpio number for reset. -1 for none."); #define SET_WRITE 0x7FFF /* usage: and operation */ #define SET_READ 0x8000 /* usage: or operation */ +#define WFX_RESET_INVERTED 1 + static const struct wfx_platform_data wfx_spi_pdata = { .file_fw = "wfm_wf200", .file_pds = "wf200.pds", @@ -154,6 +156,11 @@ static void wfx_spi_request_rx(struct work_struct *work) wfx_bh_request_rx(bus->core); } +static void wfx_flush_irq_work(void *w) +{ + flush_work(w); +} + static size_t wfx_spi_align_size(void *priv, size_t size) { // Most of SPI controllers avoid DMA if buffer size is not 32bit aligned @@ -201,28 +208,31 @@ static int wfx_spi_probe(struct spi_device *func) if (!bus->gpio_reset) { dev_warn(&func->dev, "try to load firmware anyway\n"); } else { - gpiod_set_value(bus->gpio_reset, 0); - udelay(100); - gpiod_set_value(bus->gpio_reset, 1); - udelay(2000); + if (spi_get_device_id(func)->driver_data & WFX_RESET_INVERTED) + gpiod_toggle_active_low(bus->gpio_reset); + gpiod_set_value_cansleep(bus->gpio_reset, 1); + usleep_range(100, 150); + gpiod_set_value_cansleep(bus->gpio_reset, 0); + usleep_range(2000, 2500); } - ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler, - IRQF_TRIGGER_RISING, "wfx", bus); - if (ret) - return ret; - INIT_WORK(&bus->request_rx, wfx_spi_request_rx); bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata, &wfx_spi_hwbus_ops, bus); if (!bus->core) return -EIO; - ret = wfx_probe(bus->core); + ret = devm_add_action_or_reset(&func->dev, wfx_flush_irq_work, + &bus->request_rx); if (ret) - wfx_free_common(bus->core); + return ret; - return ret; + ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler, + IRQF_TRIGGER_RISING, "wfx", bus); + if (ret) + return ret; + + return wfx_probe(bus->core); } static int wfx_spi_remove(struct spi_device *func) @@ -230,11 +240,6 @@ static int wfx_spi_remove(struct spi_device *func) struct wfx_spi_priv *bus = spi_get_drvdata(func); wfx_release(bus->core); - wfx_free_common(bus->core); - // A few IRQ will be sent during device release. Hopefully, no IRQ - // should happen after wdev/wvif are released. - devm_free_irq(&func->dev, func->irq, bus); - flush_work(&bus->request_rx); return 0; } @@ -244,14 +249,16 @@ static int wfx_spi_remove(struct spi_device *func) * stripped. */ static const struct spi_device_id wfx_spi_id[] = { - { "wfx-spi", 0 }, + { "wfx-spi", WFX_RESET_INVERTED }, + { "wf200", 0 }, { }, }; MODULE_DEVICE_TABLE(spi, wfx_spi_id); #ifdef CONFIG_OF static const struct of_device_id wfx_spi_of_match[] = { - { .compatible = "silabs,wfx-spi" }, + { .compatible = "silabs,wfx-spi", .data = (void *)WFX_RESET_INVERTED }, + { .compatible = "silabs,wf200" }, { }, }; MODULE_DEVICE_TABLE(of, wfx_spi_of_match); diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c index 5d198457c6ce..c5b83fedeb55 100644 --- a/drivers/staging/wfx/data_rx.c +++ b/drivers/staging/wfx/data_rx.c @@ -17,7 +17,7 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev, const struct hif_ind_rx *arg, struct sk_buff *skb) { - struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; size_t hdrlen = ieee80211_hdrlen(frame->frame_control); size_t iv_len, icv_len; @@ -62,7 +62,6 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev, memmove(skb->data + iv_len, skb->data, hdrlen); skb_pull(skb, iv_len); return 0; - } void wfx_rx_cb(struct wfx_vif *wvif, diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index 20f4740734f2..42183c70d4df 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -227,7 +227,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif) memzcmp(policies[i].rates, sizeof(policies[i].rates))) break; if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) { - policies[i].uploaded = 1; + policies[i].uploaded = true; memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates)); spin_unlock_bh(&wvif->tx_policy_cache.lock); hif_set_tx_rate_retry_policy(wvif, i, tmp_rates); @@ -300,11 +300,11 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr, } static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif, - struct ieee80211_sta *sta, - struct ieee80211_hdr *hdr) + struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr) { struct wfx_sta_priv *sta_priv = - sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL; + sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; const u8 *da = ieee80211_get_DA(hdr); if (sta_priv && sta_priv->link_id) @@ -368,7 +368,7 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) } static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif, - struct ieee80211_tx_info *tx_info) + struct ieee80211_tx_info *tx_info) { bool tx_policy_renew = false; u8 rate_id; @@ -430,7 +430,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int queue_id = tx_info->hw_queue; - size_t offset = (size_t) skb->data & 3; + size_t offset = (size_t)skb->data & 3; int wmsg_len = sizeof(struct hif_msg) + sizeof(struct hif_req_tx) + offset; diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h index 04b2147101b6..c545dd75449b 100644 --- a/drivers/staging/wfx/data_tx.h +++ b/drivers/staging/wfx/data_tx.h @@ -61,7 +61,7 @@ static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) { struct hif_msg *hif = (struct hif_msg *)skb->data; - struct hif_req_tx *req = (struct hif_req_tx *) hif->body; + struct hif_req_tx *req = (struct hif_req_tx *)hif->body; return req; } diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h index 5554d6eddbf3..071b71e2a107 100644 --- a/drivers/staging/wfx/hif_api_cmd.h +++ b/drivers/staging/wfx/hif_api_cmd.h @@ -95,10 +95,6 @@ struct hif_req_reset { struct hif_reset_flags reset_flags; } __packed; -struct hif_cnf_reset { - u32 status; -} __packed; - struct hif_req_read_mib { u16 mib_id; u16 reserved; diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c index 2428363371fa..77bca43aca42 100644 --- a/drivers/staging/wfx/hif_tx.c +++ b/drivers/staging/wfx/hif_tx.c @@ -140,6 +140,7 @@ int hif_shutdown(struct wfx_dev *wdev) else control_reg_write(wdev, 0); mutex_unlock(&wdev->hif_cmd.lock); + mutex_unlock(&wdev->hif_cmd.key_renew_lock); kfree(hif); return ret; } @@ -289,7 +290,7 @@ int hif_stop_scan(struct wfx_vif *wvif) } int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - const struct ieee80211_channel *channel, const u8 *ssidie) + struct ieee80211_channel *channel, const u8 *ssid, int ssidlen) { int ret; struct hif_msg *hif; @@ -307,9 +308,9 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); memcpy(body->bssid, conf->bssid, sizeof(body->bssid)); - if (!conf->ibss_joined && ssidie) { - body->ssid_length = cpu_to_le32(ssidie[1]); - memcpy(body->ssid, &ssidie[2], ssidie[1]); + if (!conf->ibss_joined && ssid) { + body->ssid_length = cpu_to_le32(ssidlen); + memcpy(body->ssid, ssid, ssidlen); } wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body)); ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); @@ -427,9 +428,9 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, struct hif_msg *hif; struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif); - body->dtim_period = conf->dtim_period, - body->short_preamble = conf->use_short_preamble, - body->channel_number = cpu_to_le16(channel->hw_value), + body->dtim_period = conf->dtim_period; + body->short_preamble = conf->use_short_preamble; + body->channel_number = cpu_to_le16(channel->hw_value); body->beacon_interval = cpu_to_le32(conf->beacon_int); body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h index 20977e461718..f8520a14c14c 100644 --- a/drivers/staging/wfx/hif_tx.h +++ b/drivers/staging/wfx/hif_tx.h @@ -46,7 +46,7 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, int chan_start, int chan_num); int hif_stop_scan(struct wfx_vif *wvif); int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - const struct ieee80211_channel *channel, const u8 *ssidie); + struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout); int hif_set_bss_params(struct wfx_vif *wvif, const struct hif_req_set_bss_params *arg); diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h index bf3769c2a9b6..26b1406f9f6c 100644 --- a/drivers/staging/wfx/hif_tx_mib.h +++ b/drivers/staging/wfx/hif_tx_mib.h @@ -191,10 +191,10 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif, } static inline int hif_set_association_mode(struct wfx_vif *wvif, - struct ieee80211_bss_conf *info, - struct ieee80211_sta_ht_cap *ht_cap) + struct ieee80211_bss_conf *info) { int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates); + struct ieee80211_sta *sta = NULL; struct hif_mib_set_association_mode val = { .preambtype_use = 1, .mode = 1, @@ -204,12 +204,17 @@ static inline int hif_set_association_mode(struct wfx_vif *wvif, .basic_rate_set = cpu_to_le32(basic_rates) }; + rcu_read_lock(); // protect sta + if (info->bssid && !info->ibss_joined) + sta = ieee80211_find_sta(wvif->vif, info->bssid); + // FIXME: it is strange to not retrieve all information from bss_info - if (ht_cap && ht_cap->ht_supported) { - val.mpdu_start_spacing = ht_cap->ampdu_density; + if (sta && sta->ht_cap.ht_supported) { + val.mpdu_start_spacing = sta->ht_cap.ampdu_density; if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)) - val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD); + val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); } + rcu_read_unlock(); return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val)); diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c index 47e04c59ed93..d3a141d95a0e 100644 --- a/drivers/staging/wfx/hwio.c +++ b/drivers/staging/wfx/hwio.c @@ -142,7 +142,7 @@ static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, goto err; if (!(cfg & prefetch)) break; - udelay(200); + usleep_range(200, 250); } if (i == 20) { ret = -ETIMEDOUT; diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index 84adad64fc30..3c4c240229ad 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -262,6 +262,16 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev) return ret; } +static void wfx_free_common(void *data) +{ + struct wfx_dev *wdev = data; + + mutex_destroy(&wdev->rx_stats_lock); + mutex_destroy(&wdev->conf_mutex); + wfx_tx_queues_deinit(wdev); + ieee80211_free_hw(wdev->hw); +} + struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, const struct hwbus_ops *hwbus_ops, @@ -332,15 +342,10 @@ struct wfx_dev *wfx_init_common(struct device *dev, wfx_init_hif_cmd(&wdev->hif_cmd); wfx_tx_queues_init(wdev); - return wdev; -} + if (devm_add_action_or_reset(dev, wfx_free_common, wdev)) + return NULL; -void wfx_free_common(struct wfx_dev *wdev) -{ - mutex_destroy(&wdev->rx_stats_lock); - mutex_destroy(&wdev->conf_mutex); - wfx_tx_queues_deinit(wdev); - ieee80211_free_hw(wdev->hw); + return wdev; } int wfx_probe(struct wfx_dev *wdev) @@ -420,7 +425,7 @@ int wfx_probe(struct wfx_dev *wdev) "enable 'quiescent' power mode with gpio %d and PDS file %s\n", desc_to_gpio(wdev->pdata.gpio_wakeup), wdev->pdata.file_pds); - gpiod_set_value(wdev->pdata.gpio_wakeup, 1); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); control_reg_write(wdev, 0); hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT); } else { diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h index 875f8c227803..9c9410072def 100644 --- a/drivers/staging/wfx/main.h +++ b/drivers/staging/wfx/main.h @@ -34,7 +34,6 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, const struct hwbus_ops *hwbus_ops, void *hwbus_priv); -void wfx_free_common(struct wfx_dev *wdev); int wfx_probe(struct wfx_dev *wdev); void wfx_release(struct wfx_dev *wdev); diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index 0bcc61feee1d..39d9127ce4b9 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -130,12 +130,12 @@ static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue, spin_lock_bh(&queue->queue.lock); while ((item = __skb_dequeue(&queue->queue)) != NULL) skb_queue_head(gc_list, item); - spin_lock_bh(&stats->pending.lock); + spin_lock_nested(&stats->pending.lock, 1); for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) { stats->link_map_cache[i] -= queue->link_map_cache[i]; queue->link_map_cache[i] = 0; } - spin_unlock_bh(&stats->pending.lock); + spin_unlock(&stats->pending.lock); spin_unlock_bh(&queue->queue.lock); } @@ -207,9 +207,9 @@ void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue, ++queue->link_map_cache[tx_priv->link_id]; - spin_lock_bh(&stats->pending.lock); + spin_lock_nested(&stats->pending.lock, 1); ++stats->link_map_cache[tx_priv->link_id]; - spin_unlock_bh(&stats->pending.lock); + spin_unlock(&stats->pending.lock); spin_unlock_bh(&queue->queue.lock); } @@ -237,11 +237,11 @@ static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev, __skb_unlink(skb, &queue->queue); --queue->link_map_cache[tx_priv->link_id]; - spin_lock_bh(&stats->pending.lock); + spin_lock_nested(&stats->pending.lock, 1); __skb_queue_tail(&stats->pending, skb); if (!--stats->link_map_cache[tx_priv->link_id]) wakeup_stats = true; - spin_unlock_bh(&stats->pending.lock); + spin_unlock(&stats->pending.lock); } spin_unlock_bh(&queue->queue.lock); if (wakeup_stats) @@ -259,10 +259,10 @@ int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb) spin_lock_bh(&queue->queue.lock); ++queue->link_map_cache[tx_priv->link_id]; - spin_lock_bh(&stats->pending.lock); + spin_lock_nested(&stats->pending.lock, 1); ++stats->link_map_cache[tx_priv->link_id]; __skb_unlink(skb, &stats->pending); - spin_unlock_bh(&stats->pending.lock); + spin_unlock(&stats->pending.lock); __skb_queue_tail(&queue->queue, skb); spin_unlock_bh(&queue->queue.lock); return 0; @@ -481,7 +481,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) struct wfx_queue *vif_queue = NULL; u32 tx_allowed_mask = 0; u32 vif_tx_allowed_mask = 0; - const struct wfx_tx_priv *tx_priv = NULL; struct wfx_vif *wvif; int not_found; int burst; @@ -541,8 +540,7 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask); if (!skb) continue; - tx_priv = wfx_skb_tx_priv(skb); - hif = (struct hif_msg *) skb->data; + hif = (struct hif_msg *)skb->data; wvif = wdev_to_wvif(wdev, hif->interface); WARN_ON(!wvif); diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index 03d0f224ffdb..9d430346a58b 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -293,7 +293,6 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct wfx_dev *wdev = hw->priv; struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv; int old_uapsd = wvif->uapsd_mask; - int ret = 0; WARN_ON(queue >= hw->queues); @@ -307,7 +306,7 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wfx_update_pm(wvif); } mutex_unlock(&wdev->conf_mutex); - return ret; + return 0; } int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) @@ -491,9 +490,11 @@ static void wfx_set_mfp(struct wfx_vif *wvif, static void wfx_do_join(struct wfx_vif *wvif) { int ret; - const u8 *ssidie; struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf; struct cfg80211_bss *bss = NULL; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + const u8 *ssidie = NULL; + int ssidlen = 0; wfx_tx_lock_flush(wvif->wdev); @@ -514,11 +515,14 @@ static void wfx_do_join(struct wfx_vif *wvif) if (!wvif->beacon_int) wvif->beacon_int = 1; - rcu_read_lock(); + rcu_read_lock(); // protect ssidie if (!conf->ibss_joined) ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); - else - ssidie = NULL; + if (ssidie) { + ssidlen = ssidie[1]; + memcpy(ssid, &ssidie[2], ssidie[1]); + } + rcu_read_unlock(); wfx_tx_flush(wvif->wdev); @@ -527,10 +531,8 @@ static void wfx_do_join(struct wfx_vif *wvif) wfx_set_mfp(wvif, bss); - /* Perform actual join */ wvif->wdev->tx_burst_idx = -1; - ret = hif_join(wvif, conf, wvif->channel, ssidie); - rcu_read_unlock(); + ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen); if (ret) { ieee80211_connection_loss(wvif->vif); wvif->join_complete_status = -1; @@ -605,7 +607,9 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int i; for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++) - WARN(sta_priv->buffered[i], "release station while Tx is in progress"); + if (sta_priv->buffered[i]) + dev_warn(wvif->wdev->dev, "release station while %d pending frame on queue %d", + sta_priv->buffered[i], i); // FIXME: see note in wfx_sta_add() if (vif->type == NL80211_IFTYPE_STATION) return 0; @@ -689,6 +693,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif, wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]); else wvif->bss_params.operational_rate_set = -1; + rcu_read_unlock(); if (sta && info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) hif_dual_cts_protection(wvif, true); @@ -701,8 +706,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif, wvif->bss_params.beacon_lost_count = 20; wvif->bss_params.aid = info->aid; - hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL); - rcu_read_unlock(); + hif_set_association_mode(wvif, info); if (!info->ibss_joined) { hif_keep_alive_period(wvif, 30 /* sec */); @@ -904,7 +908,7 @@ static void wfx_update_tim_work(struct work_struct *work) int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct wfx_dev *wdev = hw->priv; - struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv; + struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv; struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id); schedule_work(&wvif->update_tim_work); diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig index 59e58550d139..80c92e8bf8a5 100644 --- a/drivers/staging/wilc1000/Kconfig +++ b/drivers/staging/wilc1000/Kconfig @@ -2,6 +2,10 @@ config WILC1000 tristate help + Add support for the Atmel WILC1000 802.11 b/g/n SoC. + This provides Wi-FI over an SDIO or SPI interface, and + is usually found in IoT devices. + This module only support IEEE 802.11n WiFi. config WILC1000_SDIO @@ -22,6 +26,7 @@ config WILC1000_SPI tristate "Atmel WILC1000 SPI (WiFi only)" depends on CFG80211 && INET && SPI select WILC1000 + select CRC7 help This module adds support for the SPI interface of adapters using WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral diff --git a/drivers/staging/wilc1000/cfg80211.c b/drivers/staging/wilc1000/cfg80211.c index 4863e516ff13..4bdcbc5fd2fd 100644 --- a/drivers/staging/wilc1000/cfg80211.c +++ b/drivers/staging/wilc1000/cfg80211.c @@ -6,29 +6,17 @@ #include "cfg80211.h" -#define FRAME_TYPE_ID 0 -#define ACTION_CAT_ID 24 -#define ACTION_SUBTYPE_ID 25 -#define P2P_PUB_ACTION_SUBTYPE 30 - -#define ACTION_FRAME 0xd0 -#define GO_INTENT_ATTR_ID 0x04 -#define CHANLIST_ATTR_ID 0x0b -#define OPERCHAN_ATTR_ID 0x11 -#define PUB_ACTION_ATTR_ID 0x04 -#define P2PELEM_ATTR_ID 0xdd - #define GO_NEG_REQ 0x00 #define GO_NEG_RSP 0x01 #define GO_NEG_CONF 0x02 #define P2P_INV_REQ 0x03 #define P2P_INV_RSP 0x04 -#define PUBLIC_ACT_VENDORSPEC 0x09 -#define GAS_INITIAL_REQ 0x0a -#define GAS_INITIAL_RSP 0x0b #define WILC_INVALID_CHANNEL 0 +/* Operation at 2.4 GHz with channels 1-13 */ +#define WILC_WLAN_OPERATING_CLASS_2_4GHZ 0x51 + static const struct ieee80211_txrx_stypes wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { @@ -67,8 +55,50 @@ struct wilc_p2p_mgmt_data { u8 *buff; }; -static const u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09}; -static const u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03}; +struct wilc_p2p_pub_act_frame { + u8 category; + u8 action; + u8 oui[3]; + u8 oui_type; + u8 oui_subtype; + u8 dialog_token; + u8 elem[]; +} __packed; + +struct wilc_vendor_specific_ie { + u8 tag_number; + u8 tag_len; + u8 oui[3]; + u8 oui_type; + u8 attr[]; +} __packed; + +struct wilc_attr_entry { + u8 attr_type; + __le16 attr_len; + u8 val[]; +} __packed; + +struct wilc_attr_oper_ch { + u8 attr_type; + __le16 attr_len; + u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; + u8 op_class; + u8 op_channel; +} __packed; + +struct wilc_attr_ch_list { + u8 attr_type; + __le16 attr_len; + u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; + u8 elem[]; +} __packed; + +struct wilc_ch_list_elem { + u8 op_class; + u8 no_of_channels; + u8 ch_list[]; +} __packed; static void cfg_scan_result(enum scan_event scan_event, struct wilc_rcvd_net_info *info, void *user_void) @@ -172,9 +202,6 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status, } else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) { u16 reason = 0; - priv->p2p.local_random = 0x01; - priv->p2p.recv_random = 0x00; - priv->p2p.is_wilc_ie = false; eth_zero_addr(priv->associated_bss); wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE); @@ -446,9 +473,6 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, wilc->sta_ch = WILC_INVALID_CHANNEL; wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE); - priv->p2p.local_random = 0x01; - priv->p2p.recv_random = 0x00; - priv->p2p.is_wilc_ie = false; priv->hif_drv->p2p_timeout = 0; ret = wilc_disconnect(vif); @@ -864,7 +888,6 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { u32 i; - int ret = 0; struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; @@ -877,21 +900,20 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev, } } - if (i < priv->pmkid_list.numpmkid && priv->pmkid_list.numpmkid > 0) { - for (; i < (priv->pmkid_list.numpmkid - 1); i++) { - memcpy(priv->pmkid_list.pmkidlist[i].bssid, - priv->pmkid_list.pmkidlist[i + 1].bssid, - ETH_ALEN); - memcpy(priv->pmkid_list.pmkidlist[i].pmkid, - priv->pmkid_list.pmkidlist[i + 1].pmkid, - WLAN_PMKID_LEN); - } - priv->pmkid_list.numpmkid--; - } else { - ret = -EINVAL; + if (i == priv->pmkid_list.numpmkid) + return -EINVAL; + + for (; i < (priv->pmkid_list.numpmkid - 1); i++) { + memcpy(priv->pmkid_list.pmkidlist[i].bssid, + priv->pmkid_list.pmkidlist[i + 1].bssid, + ETH_ALEN); + memcpy(priv->pmkid_list.pmkidlist[i].pmkid, + priv->pmkid_list.pmkidlist[i + 1].pmkid, + WLAN_PMKID_LEN); } + priv->pmkid_list.numpmkid--; - return ret; + return 0; } static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) @@ -903,113 +925,50 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) return 0; } -static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx, - u8 op_ch_attr_idx, u8 sta_ch) -{ - int i = 0; - int j = 0; - - if (ch_list_attr_idx) { - u8 limit = ch_list_attr_idx + 3 + buf[ch_list_attr_idx + 1]; - - for (i = ch_list_attr_idx + 3; i < limit; i++) { - if (buf[i] == 0x51) { - for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) - buf[j] = sta_ch; - break; - } - } - } - - if (op_ch_attr_idx) { - buf[op_ch_attr_idx + 6] = 0x51; - buf[op_ch_attr_idx + 7] = sta_ch; - } -} - -static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len, u8 sta_ch) +static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch) { + struct wilc_attr_entry *e; + struct wilc_attr_ch_list *ch_list; + struct wilc_attr_oper_ch *op_ch; u32 index = 0; - u8 op_channel_attr_index = 0; - u8 channel_list_attr_index = 0; - - while (index < len) { - if (buf[index] == GO_INTENT_ATTR_ID) - buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1); - - if (buf[index] == CHANLIST_ATTR_ID) - channel_list_attr_index = index; - else if (buf[index] == OPERCHAN_ATTR_ID) - op_channel_attr_index = index; - index += buf[index + 1] + 3; - } - if (sta_ch != WILC_INVALID_CHANNEL) - wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index, - op_channel_attr_index, sta_ch); -} + u8 ch_list_idx = 0; + u8 op_ch_idx = 0; -static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch, - u8 iftype, u8 sta_ch) -{ - u32 index = 0; - u8 op_channel_attr_index = 0; - u8 channel_list_attr_index = 0; - - while (index < len) { - if (buf[index] == GO_INTENT_ATTR_ID) { - buf[index + 3] = (buf[index + 3] & 0x01) | (0x0f << 1); + if (sta_ch == WILC_INVALID_CHANNEL) + return; + while (index + sizeof(*e) <= len) { + e = (struct wilc_attr_entry *)&buf[index]; + if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST) + ch_list_idx = index; + else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL) + op_ch_idx = index; + if (ch_list_idx && op_ch_idx) break; - } - - if (buf[index] == CHANLIST_ATTR_ID) - channel_list_attr_index = index; - else if (buf[index] == OPERCHAN_ATTR_ID) - op_channel_attr_index = index; - index += buf[index + 1] + 3; + index += le16_to_cpu(e->attr_len) + sizeof(*e); } - if (sta_ch != WILC_INVALID_CHANNEL && oper_ch) - wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index, - op_channel_attr_index, sta_ch); -} -static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff, - u32 size) -{ - int i; - u8 subtype; - struct wilc_vif *vif = netdev_priv(priv->dev); - - subtype = buff[P2P_PUB_ACTION_SUBTYPE]; - if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) && - !priv->p2p.is_wilc_ie) { - for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++) { - if (!memcmp(p2p_vendor_spec, &buff[i], 6)) { - priv->p2p.recv_random = buff[i + 6]; - priv->p2p.is_wilc_ie = true; + if (ch_list_idx) { + u16 attr_size; + struct wilc_ch_list_elem *e; + int i; + + ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx]; + attr_size = le16_to_cpu(ch_list->attr_len); + for (i = 0; i < attr_size;) { + e = (struct wilc_ch_list_elem *)(ch_list->elem + i); + if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) { + memset(e->ch_list, sta_ch, e->no_of_channels); break; } + i += e->no_of_channels; } } - if (priv->p2p.local_random <= priv->p2p.recv_random) { - netdev_dbg(vif->ndev, - "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", - priv->p2p.local_random, priv->p2p.recv_random); - return; - } - - if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP || - subtype == P2P_INV_REQ || subtype == P2P_INV_RSP) { - for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) { - if (buff[i] == P2PELEM_ATTR_ID && - !(memcmp(p2p_oui, &buff[i + 2], 4))) { - wilc_wfi_cfg_parse_rx_action(&buff[i + 6], - size - (i + 6), - vif->wilc->sta_ch); - break; - } - } + if (op_ch_idx) { + op_ch = (struct wilc_attr_oper_ch *)&buf[op_ch_idx]; + op_ch->op_class = WILC_WLAN_OPERATING_CLASS_2_4GHZ; + op_ch->op_channel = sta_ch; } } @@ -1018,17 +977,22 @@ void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size) struct wilc *wl = vif->wilc; struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; + struct ieee80211_mgmt *mgmt; + struct wilc_vendor_specific_ie *p; + struct wilc_p2p_pub_act_frame *d; + int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d); + const u8 *vendor_ie; u32 header, pkt_offset; s32 freq; - __le16 fc; header = get_unaligned_le32(buff - HOST_HDR_OFFSET); - pkt_offset = GET_PKT_OFFSET(header); + pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { bool ack = false; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)buff; - if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP || + if (ieee80211_is_probe_resp(hdr->frame_control) || pkt_offset & IS_MGMT_STATUS_SUCCES) ack = true; @@ -1039,44 +1003,33 @@ void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size) freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ); - fc = ((struct ieee80211_hdr *)buff)->frame_control; - if (!ieee80211_is_action(fc)) { - cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0); - return; - } + mgmt = (struct ieee80211_mgmt *)buff; + if (!ieee80211_is_action(mgmt->frame_control)) + goto out_rx_mgmt; if (priv->cfg_scanning && time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) { netdev_dbg(vif->ndev, "Receiving action wrong ch\n"); return; } - if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) { - u8 subtype = buff[P2P_PUB_ACTION_SUBTYPE]; - switch (buff[ACTION_SUBTYPE_ID]) { - case GAS_INITIAL_REQ: - case GAS_INITIAL_RSP: - break; + if (!ieee80211_is_public_action((struct ieee80211_hdr *)buff, size)) + goto out_rx_mgmt; - case PUBLIC_ACT_VENDORSPEC: - if (!memcmp(p2p_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) - wilc_wfi_cfg_parse_rx_vendor_spec(priv, buff, - size); + d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action); + if (d->oui_subtype != GO_NEG_REQ && d->oui_subtype != GO_NEG_RSP && + d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP) + goto out_rx_mgmt; - if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) && - priv->p2p.is_wilc_ie) - size -= 7; + vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + buff + ie_offset, size - ie_offset); + if (!vendor_ie) + goto out_rx_mgmt; - break; - - default: - netdev_dbg(vif->ndev, - "%s: Not handled action frame type:%x\n", - __func__, buff[ACTION_SUBTYPE_ID]); - break; - } - } + p = (struct wilc_vendor_specific_ie *)vendor_ie; + wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch); +out_rx_mgmt: cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0); } @@ -1156,57 +1109,6 @@ static int cancel_remain_on_channel(struct wiphy *wiphy, return wilc_listen_state_expired(vif, cookie); } -static void wilc_wfi_cfg_tx_vendor_spec(struct wilc_priv *priv, - struct wilc_p2p_mgmt_data *mgmt_tx, - struct cfg80211_mgmt_tx_params *params, - u8 iftype, u32 buf_len) -{ - const u8 *buf = params->buf; - size_t len = params->len; - u32 i; - u8 subtype = buf[P2P_PUB_ACTION_SUBTYPE]; - struct wilc_vif *vif = netdev_priv(priv->dev); - - if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) { - if (priv->p2p.local_random == 1 && - priv->p2p.recv_random < priv->p2p.local_random) { - get_random_bytes(&priv->p2p.local_random, 1); - priv->p2p.local_random++; - } - } - - if (priv->p2p.local_random <= priv->p2p.recv_random || - !(subtype == GO_NEG_REQ || subtype == GO_NEG_RSP || - subtype == P2P_INV_REQ || subtype == P2P_INV_RSP)) - return; - - for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) { - if (buf[i] == P2PELEM_ATTR_ID && - !memcmp(p2p_oui, &buf[i + 2], 4)) { - bool oper_ch = false; - u8 *tx_buff = &mgmt_tx->buff[i + 6]; - - if (subtype == P2P_INV_REQ || subtype == P2P_INV_RSP) - oper_ch = true; - - wilc_wfi_cfg_parse_tx_action(tx_buff, len - (i + 6), - oper_ch, iftype, - vif->wilc->sta_ch); - - break; - } - } - - if (subtype != P2P_INV_REQ && subtype != P2P_INV_RSP) { - int vendor_spec_len = sizeof(p2p_vendor_spec); - - memcpy(&mgmt_tx->buff[len], p2p_vendor_spec, - vendor_spec_len); - mgmt_tx->buff[len + vendor_spec_len] = priv->p2p.local_random; - mgmt_tx->size = buf_len; - } -} - static int mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, @@ -1221,8 +1123,10 @@ static int mgmt_tx(struct wiphy *wiphy, struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; - u32 buf_len = len + sizeof(p2p_vendor_spec) + - sizeof(priv->p2p.local_random); + struct wilc_vendor_specific_ie *p; + struct wilc_p2p_pub_act_frame *d; + int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d); + const u8 *vendor_ie; int ret = 0; *cookie = prandom_u32(); @@ -1238,14 +1142,13 @@ static int mgmt_tx(struct wiphy *wiphy, goto out; } - mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL); + mgmt_tx->buff = kmemdup(buf, len, GFP_KERNEL); if (!mgmt_tx->buff) { ret = -ENOMEM; kfree(mgmt_tx); goto out; } - memcpy(mgmt_tx->buff, buf, len); mgmt_tx->size = len; if (ieee80211_is_probe_resp(mgmt->frame_control)) { @@ -1254,39 +1157,29 @@ static int mgmt_tx(struct wiphy *wiphy, goto out_txq_add_pkt; } - if (!ieee80211_is_action(mgmt->frame_control)) - goto out_txq_add_pkt; + if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len)) + goto out_set_timeout; - if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) { - if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC || - buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) { - wilc_set_mac_chnl_num(vif, chan->hw_value); - vif->wilc->op_ch = chan->hw_value; - } - switch (buf[ACTION_SUBTYPE_ID]) { - case GAS_INITIAL_REQ: - case GAS_INITIAL_RSP: - break; + d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action); + if (d->oui_type != WLAN_OUI_TYPE_WFA_P2P || + d->oui_subtype != GO_NEG_CONF) { + wilc_set_mac_chnl_num(vif, chan->hw_value); + vif->wilc->op_ch = chan->hw_value; + } - case PUBLIC_ACT_VENDORSPEC: - if (!memcmp(p2p_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) - wilc_wfi_cfg_tx_vendor_spec(priv, mgmt_tx, - params, vif->iftype, - buf_len); - else - netdev_dbg(vif->ndev, - "Not a P2P public action frame\n"); + if (d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP) + goto out_set_timeout; - break; + vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + mgmt_tx->buff + ie_offset, + len - ie_offset); + if (!vendor_ie) + goto out_set_timeout; - default: - netdev_dbg(vif->ndev, - "%s: Not handled action frame type:%x\n", - __func__, buf[ACTION_SUBTYPE_ID]); - break; - } - } + p = (struct wilc_vendor_specific_ie *)vendor_ie; + wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch); +out_set_timeout: wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait)); out_txq_add_pkt: @@ -1400,10 +1293,6 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; - priv->p2p.local_random = 0x01; - priv->p2p.recv_random = 0x00; - priv->p2p.is_wilc_ie = false; - switch (type) { case NL80211_IFTYPE_STATION: vif->connecting = false; diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c index 658790bd465b..6c7de2f8d3f2 100644 --- a/drivers/staging/wilc1000/hif.c +++ b/drivers/staging/wilc1000/hif.c @@ -801,7 +801,7 @@ static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac, if (params->ht_capa) { *cur_byte++ = true; - memcpy(cur_byte, ¶ms->ht_capa, + memcpy(cur_byte, params->ht_capa, sizeof(struct ieee80211_ht_cap)); } else { *cur_byte++ = false; @@ -861,9 +861,8 @@ static int wilc_handle_roc_expired(struct wilc_vif *vif, u64 cookie) struct wid wid; int result; struct host_if_drv *hif_drv = vif->hif_drv; - struct wilc_priv *priv = wdev_priv(vif->ndev->ieee80211_ptr); - if (priv->p2p_listen_state) { + if (vif->priv.p2p_listen_state) { remain_on_chan_flag = false; wid.id = WID_REMAIN_ON_CHAN; wid.type = WID_STR; diff --git a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt deleted file mode 100644 index da5235950a70..000000000000 --- a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt +++ /dev/null @@ -1,38 +0,0 @@ -* Microchip WILC wireless SDIO device - -The wilc1000 chips can be connected via SDIO. The node is used to specifiy -child node to the SDIO controller that connects the device to the system. - -Required properties: -- compatible : Should be "microchip,wilc1000-spi" -- irq-gpios : Connect to a host IRQ -- reg : Slot ID used in the controller - -Optional: -- bus-width : Number of data lines wired up the slot. Default 1 bit. -- rtc_clk : Clock connected on the rtc clock line. Must be assigned - a frequency with assigned-clocks property, and must be - connected to a clock provider. - -Examples: -mmc1: mmc@fc000000 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>; - non-removable; - vmmc-supply = <&vcc_mmc1_reg>; - vqmmc-supply = <&vcc_3v3_reg>; - status = "okay"; - - wilc_sdio@0 { - compatible = "microchip,wilc1000-sdio"; - irq-gpios = <&pioC 27 0>; - clocks = <&pck1>; - clock-names = "rtc_clk"; - assigned-clocks = <&pck1>; - assigned-clock-rates = <32768>; - status = "okay"; - reg = <0>; - bus-width = <4>; - } - }; -} diff --git a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt deleted file mode 100644 index 34236932dbb6..000000000000 --- a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt +++ /dev/null @@ -1,34 +0,0 @@ -* Microchip WILC wireless SPI device - -The wilc1000 chips can be connected via SPI. This document describes -the binding for the SPI connected module. - -Required properties: -- compatible : Should be "microchip,wilc1000-spi" -- spi-max-frequency : Maximum SPI clocking speed of device in Hz -- reg : Chip select address of device -- irq-gpios : Connect to a host IRQ - -Optional: -- rtc_clk : Clock connected on the rtc clock line. Must be assigned - a frequency with assigned-clocks property, and must be - connected to a clock provider. - -Examples: - -spi1: spi@fc018000 { - cs-gpios = <&pioB 21 0>; - status = "okay"; - - wilc_spi@0 { - compatible = "microchip,wilc1000-spi"; - spi-max-frequency = <48000000>; - reg = <0>; - irq-gpios = <&pioC 27 0>; - clocks = <&pck1>; - clock-names = "rtc_clk"; - assigned-clocks = <&pck1>; - assigned-clock-rates = <32768>; - status = "okay"; - }; -}; diff --git a/drivers/staging/wilc1000/microchip,wilc1000.yaml b/drivers/staging/wilc1000/microchip,wilc1000.yaml new file mode 100644 index 000000000000..2c320eb2a8c4 --- /dev/null +++ b/drivers/staging/wilc1000/microchip,wilc1000.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/microchip,wilc1000.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip WILC wireless devicetree bindings + +maintainers: + - Adham Abozaeid <adham.abozaeid@microchip.com> + - Ajay Singh <ajay.kathat@microchip.com> + +description: + The wilc1000 chips can be connected via SPI or SDIO. This document + describes the binding to connect wilc devices. + +properties: + compatible: + const: microchip,wilc1000 + + spi-max-frequency: true + + interrupts: + maxItems: 1 + + clocks: + description: phandle to the clock connected on rtc clock line. + maxItems: 1 + + clock-names: + const: rtc + +required: + - compatible + - interrupts + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + wifi@0 { + compatible = "microchip,wilc1000"; + spi-max-frequency = <48000000>; + reg = <0>; + interrupt-parent = <&pioC>; + interrupts = <27 0>; + clocks = <&pck1>; + clock-names = "rtc"; + }; + }; + + - | + mmc { + #address-cells = <1>; + #size-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>; + non-removable; + vmmc-supply = <&vcc_mmc1_reg>; + vqmmc-supply = <&vcc_3v3_reg>; + bus-width = <4>; + wifi@0 { + compatible = "microchip,wilc1000"; + reg = <0>; + interrupt-parent = <&pioC>; + interrupts = <27 0>; + clocks = <&pck1>; + clock-names = "rtc"; + }; + }; diff --git a/drivers/staging/wilc1000/mon.c b/drivers/staging/wilc1000/mon.c index 48ac33f06f63..60331417bd98 100644 --- a/drivers/staging/wilc1000/mon.c +++ b/drivers/staging/wilc1000/mon.c @@ -40,7 +40,7 @@ void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size) * The packet offset field contain info about what type of management * the frame we are dealing with and ack status */ - pkt_offset = GET_PKT_OFFSET(header); + pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { /* hostapd callback mgmt frame */ diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c index fce5bf2d82fa..f94a17babd12 100644 --- a/drivers/staging/wilc1000/netdev.c +++ b/drivers/staging/wilc1000/netdev.c @@ -46,29 +46,21 @@ static irqreturn_t isr_bh_routine(int irq, void *userdata) static int init_irq(struct net_device *dev) { - int ret = 0; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wl = vif->wilc; - - ret = gpiod_direction_input(wl->gpio_irq); - if (ret) { - netdev_err(dev, "could not obtain gpio for WILC_INTR\n"); - return ret; - } - - wl->dev_irq_num = gpiod_to_irq(wl->gpio_irq); + int ret; ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine, isr_bh_routine, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "WILC_IRQ", dev); - if (ret < 0) - netdev_err(dev, "Failed to request IRQ\n"); - else - netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n", - wl->dev_irq_num); + if (ret) { + netdev_err(dev, "Failed to request IRQ [%d]\n", ret); + return ret; + } + netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n", wl->dev_irq_num); - return ret; + return 0; } static void deinit_irq(struct net_device *dev) @@ -501,7 +493,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif) if (ret) goto fail_wilc_wlan; - if (wl->gpio_irq && init_irq(dev)) { + if (wl->dev_irq_num && init_irq(dev)) { ret = -EIO; goto fail_threads; } @@ -577,7 +569,6 @@ static int wilc_mac_open(struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wl = vif->wilc; - struct wilc_priv *priv = wdev_priv(vif->ndev->ieee80211_ptr); unsigned char mac_add[ETH_ALEN] = {0}; int ret = 0; @@ -621,7 +612,6 @@ static int wilc_mac_open(struct net_device *ndev) vif->frame_reg[1].reg); netif_wake_queue(ndev); wl->open_ifcs++; - priv->p2p.local_random = 0x01; vif->mac_opened = 1; return 0; } @@ -804,8 +794,10 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size) u16 type = le16_to_cpup((__le16 *)buff); if (vif->priv.p2p_listen_state && - ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) || - (type == vif->frame_reg[1].type && vif->frame_reg[1].reg))) + ((type == vif->frame_reg[0].type && + vif->frame_reg[0].reg) || + (type == vif->frame_reg[1].type && + vif->frame_reg[1].reg))) wilc_wfi_p2p_rx(vif, buff, size); if (vif->monitor_flag) diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h index d5f7a6037fbc..61cbec674a62 100644 --- a/drivers/staging/wilc1000/netdev.h +++ b/drivers/staging/wilc1000/netdev.h @@ -29,8 +29,6 @@ #define TCP_ACK_FILTER_LINK_SPEED_THRESH 54 #define DEFAULT_LINK_SPEED 72 -#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff) - struct wilc_wfi_stats { unsigned long rx_packets; unsigned long tx_packets; @@ -66,12 +64,6 @@ struct wilc_wfi_p2p_listen_params { u64 listen_cookie; }; -struct wilc_p2p_var { - u8 local_random; - u8 recv_random; - bool is_wilc_ie; -}; - static const u32 wilc_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, @@ -155,7 +147,6 @@ struct wilc_priv { struct mutex scan_req_lock; bool p2p_listen_state; int scanned_cnt; - struct wilc_p2p_var p2p; u64 inc_roc_cookie; }; @@ -218,7 +209,6 @@ struct wilc { const struct wilc_hif_func *hif_func; int io_type; s8 mac_status; - struct gpio_desc *gpio_irq; struct clk *rtc_clk; bool initialized; int dev_irq_num; diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c index ca99335687c4..36eb589263bf 100644 --- a/drivers/staging/wilc1000/sdio.c +++ b/drivers/staging/wilc1000/sdio.c @@ -7,6 +7,8 @@ #include <linux/clk.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/host.h> +#include <linux/mmc/sdio.h> +#include <linux/of_irq.h> #include "netdev.h" #include "cfg80211.h" @@ -26,9 +28,6 @@ static const struct sdio_device_id wilc_sdio_ids[] = { struct wilc_sdio { bool irq_gpio; u32 block_size; - int nint; -/* Max num interrupts allowed in registers 0xf7, 0xf8 */ -#define MAX_NUN_INT_THRPT_ENH2 (5) int has_thrpt_enh3; }; @@ -124,35 +123,34 @@ static int wilc_sdio_probe(struct sdio_func *func, { struct wilc *wilc; int ret; - struct gpio_desc *gpio = NULL; struct wilc_sdio *sdio_priv; sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL); if (!sdio_priv) return -ENOMEM; - if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) { - gpio = gpiod_get(&func->dev, "irq", GPIOD_IN); - if (IS_ERR(gpio)) { - /* get the GPIO descriptor from hardcode GPIO number */ - gpio = gpio_to_desc(GPIO_NUM); - if (!gpio) - dev_err(&func->dev, "failed to get irq gpio\n"); - } - } - ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO, &wilc_hif_sdio); if (ret) { kfree(sdio_priv); return ret; } + + if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) { + struct device_node *np = func->card->dev.of_node; + int irq_num = of_irq_get(np, 0); + + if (irq_num > 0) { + wilc->dev_irq_num = irq_num; + sdio_priv->irq_gpio = true; + } + } + sdio_set_drvdata(func, wilc); wilc->bus_data = sdio_priv; wilc->dev = &func->dev; - wilc->gpio_irq = gpio; - wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc_clk"); + wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc"); if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) return -EPROBE_DEFER; else if (!IS_ERR(wilc->rtc_clk)) @@ -166,10 +164,6 @@ static void wilc_sdio_remove(struct sdio_func *func) { struct wilc *wilc = sdio_get_drvdata(func); - /* free the GPIO in module remove */ - if (wilc->gpio_irq) - gpiod_put(wilc->gpio_irq); - if (!IS_ERR(wilc->rtc_clk)) clk_disable_unprepare(wilc->rtc_clk); @@ -185,8 +179,8 @@ static int wilc_sdio_reset(struct wilc *wilc) cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = 0x6; - cmd.data = 0x8; + cmd.address = SDIO_CCCR_ABORT; + cmd.data = WILC_SDIO_CCCR_ABORT_RESET; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n"); @@ -268,34 +262,38 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr) cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = 0x10c; + cmd.address = WILC_SDIO_FBR_CSA_REG; cmd.data = (u8)adr; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n"); + dev_err(&func->dev, "Failed cmd52, set %04x data...\n", + cmd.address); return ret; } - cmd.address = 0x10d; + cmd.address = WILC_SDIO_FBR_CSA_REG + 1; cmd.data = (u8)(adr >> 8); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n"); + dev_err(&func->dev, "Failed cmd52, set %04x data...\n", + cmd.address); return ret; } - cmd.address = 0x10e; + cmd.address = WILC_SDIO_FBR_CSA_REG + 2; cmd.data = (u8)(adr >> 16); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n"); + dev_err(&func->dev, "Failed cmd52, set %04x data...\n", + cmd.address); return ret; } return 0; } -static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size) +static int wilc_sdio_set_block_size(struct wilc *wilc, u8 func_num, + u32 block_size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct sdio_cmd52 cmd; @@ -304,52 +302,21 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size) cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = 0x10; + cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE; cmd.data = (u8)block_size; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n"); + dev_err(&func->dev, "Failed cmd52, set %04x data...\n", + cmd.address); return ret; } - cmd.address = 0x11; - cmd.data = (u8)(block_size >> 8); - ret = wilc_sdio_cmd52(wilc, &cmd); - if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n"); - return ret; - } - - return 0; -} - -/******************************************** - * - * Function 1 - * - ********************************************/ - -static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size) -{ - struct sdio_func *func = dev_to_sdio_func(wilc->dev); - struct sdio_cmd52 cmd; - int ret; - - cmd.read_write = 1; - cmd.function = 0; - cmd.raw = 0; - cmd.address = 0x110; - cmd.data = (u8)block_size; - ret = wilc_sdio_cmd52(wilc, &cmd); - if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n"); - return ret; - } - cmd.address = 0x111; + cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE + 1; cmd.data = (u8)(block_size >> 8); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { - dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n"); + dev_err(&func->dev, "Failed cmd52, set %04x data...\n", + cmd.address); return ret; } @@ -369,7 +336,7 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) cpu_to_le32s(&data); - if (addr >= 0xf0 && addr <= 0xff) { + if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */ struct sdio_cmd52 cmd; cmd.read_write = 1; @@ -393,7 +360,7 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) cmd.read_write = 1; cmd.function = 0; - cmd.address = 0x10f; + cmd.address = WILC_SDIO_FBR_DATA_REG; cmd.block_mode = 0; cmd.increment = 1; cmd.count = 4; @@ -419,34 +386,19 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) cmd.read_write = 1; if (addr > 0) { /** - * has to be word aligned... - **/ - if (size & 0x3) { - size += 4; - size &= ~0x3; - } - - /** * func 0 access **/ cmd.function = 0; - cmd.address = 0x10f; + cmd.address = WILC_SDIO_FBR_DATA_REG; } else { /** - * has to be word aligned... - **/ - if (size & 0x3) { - size += 4; - size &= ~0x3; - } - - /** * func 1 access **/ cmd.function = 1; - cmd.address = 0; + cmd.address = WILC_SDIO_F1_DATA_REG; } + size = ALIGN(size, 4); nblk = size / block_size; nleft = size % block_size; @@ -502,7 +454,7 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) struct wilc_sdio *sdio_priv = wilc->bus_data; int ret; - if (addr >= 0xf0 && addr <= 0xff) { + if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */ struct sdio_cmd52 cmd; cmd.read_write = 0; @@ -525,7 +477,7 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) cmd.read_write = 0; cmd.function = 0; - cmd.address = 0x10f; + cmd.address = WILC_SDIO_FBR_DATA_REG; cmd.block_mode = 0; cmd.increment = 1; cmd.count = 4; @@ -555,34 +507,19 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) cmd.read_write = 0; if (addr > 0) { /** - * has to be word aligned... - **/ - if (size & 0x3) { - size += 4; - size &= ~0x3; - } - - /** * func 0 access **/ cmd.function = 0; - cmd.address = 0x10f; + cmd.address = WILC_SDIO_FBR_DATA_REG; } else { /** - * has to be word aligned... - **/ - if (size & 0x3) { - size += 4; - size &= ~0x3; - } - - /** * func 1 access **/ cmd.function = 1; - cmd.address = 0; + cmd.address = WILC_SDIO_F1_DATA_REG; } + size = ALIGN(size, 4); nblk = size / block_size; nleft = size % block_size; @@ -651,17 +588,14 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) int loop, ret; u32 chipid; - if (!resume) - sdio_priv->irq_gpio = wilc->dev_irq_num; - /** * function 0 csa enable **/ cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; - cmd.address = 0x100; - cmd.data = 0x80; + cmd.address = SDIO_FBR_BASE(func->num); + cmd.data = SDIO_FBR_ENABLE_CSA; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, enable csa...\n"); @@ -671,7 +605,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) /** * function 0 block size **/ - ret = wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE); + ret = wilc_sdio_set_block_size(wilc, 0, WILC_SDIO_BLOCK_SIZE); if (ret) { dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n"); return ret; @@ -684,8 +618,8 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; - cmd.address = 0x2; - cmd.data = 0x2; + cmd.address = SDIO_CCCR_IOEx; + cmd.data = WILC_SDIO_CCCR_IO_EN_FUNC1; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, @@ -699,7 +633,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) cmd.read_write = 0; cmd.function = 0; cmd.raw = 0; - cmd.address = 0x3; + cmd.address = SDIO_CCCR_IORx; loop = 3; do { cmd.data = 0; @@ -709,7 +643,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) "Fail cmd 52, get IOR register...\n"); return ret; } - if (cmd.data == 0x2) + if (cmd.data == WILC_SDIO_CCCR_IO_EN_FUNC1) break; } while (loop--); @@ -721,7 +655,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) /** * func 1 is ready, set func 1 block size **/ - ret = wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE); + ret = wilc_sdio_set_block_size(wilc, 1, WILC_SDIO_BLOCK_SIZE); if (ret) { dev_err(&func->dev, "Fail set func 1 block size...\n"); return ret; @@ -733,8 +667,8 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; - cmd.address = 0x4; - cmd.data = 0x3; + cmd.address = SDIO_CCCR_IENx; + cmd.data = WILC_SDIO_CCCR_IEN_MASTER | WILC_SDIO_CCCR_IEN_FUNC1; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, set IEN register...\n"); @@ -745,13 +679,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) * make sure can read back chip id correctly **/ if (!resume) { - ret = wilc_sdio_read_reg(wilc, 0x1000, &chipid); + int rev; + + ret = wilc_sdio_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { dev_err(&func->dev, "Fail cmd read chip id...\n"); return ret; } dev_err(&func->dev, "chipid (%08x)\n", chipid); - if ((chipid & 0xfff) > 0x2a0) + rev = FIELD_GET(WILC_CHIP_REV_FIELD, chipid); + if (rev > FIELD_GET(WILC_CHIP_REV_FIELD, WILC_1000_BASE_ID_2A)) sdio_priv->has_thrpt_enh3 = 1; else sdio_priv->has_thrpt_enh3 = 0; @@ -773,12 +710,12 @@ static int wilc_sdio_read_size(struct wilc *wilc, u32 *size) cmd.read_write = 0; cmd.function = 0; cmd.raw = 0; - cmd.address = 0xf2; + cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG; cmd.data = 0; wilc_sdio_cmd52(wilc, &cmd); tmp = cmd.data; - cmd.address = 0xf3; + cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG + 1; cmd.data = 0; wilc_sdio_cmd52(wilc, &cmd); tmp |= (cmd.data << 8); @@ -792,6 +729,7 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status) struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; u32 tmp; + u8 irq_flags; struct sdio_cmd52 cmd; wilc_sdio_read_size(wilc, &tmp); @@ -800,46 +738,22 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status) * Read IRQ flags **/ if (!sdio_priv->irq_gpio) { - int i; - - cmd.read_write = 0; cmd.function = 1; - cmd.address = 0x04; - cmd.data = 0; - wilc_sdio_cmd52(wilc, &cmd); - - if (cmd.data & BIT(0)) - tmp |= INT_0; - if (cmd.data & BIT(2)) - tmp |= INT_1; - if (cmd.data & BIT(3)) - tmp |= INT_2; - if (cmd.data & BIT(4)) - tmp |= INT_3; - if (cmd.data & BIT(5)) - tmp |= INT_4; - if (cmd.data & BIT(6)) - tmp |= INT_5; - for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) { - if ((tmp >> (IRG_FLAGS_OFFSET + i)) & 0x1) { - dev_err(&func->dev, - "Unexpected interrupt (1) : tmp=%x, data=%x\n", - tmp, cmd.data); - break; - } - } + cmd.address = WILC_SDIO_EXT_IRQ_FLAG_REG; } else { - u32 irq_flags; - - cmd.read_write = 0; cmd.function = 0; - cmd.raw = 0; - cmd.address = 0xf7; - cmd.data = 0; - wilc_sdio_cmd52(wilc, &cmd); - irq_flags = cmd.data & 0x1f; - tmp |= ((irq_flags >> 0) << IRG_FLAGS_OFFSET); + cmd.address = WILC_SDIO_IRQ_FLAG_REG; } + cmd.raw = 0; + cmd.read_write = 0; + cmd.data = 0; + wilc_sdio_cmd52(wilc, &cmd); + irq_flags = cmd.data; + tmp |= FIELD_PREP(IRG_FLAGS_MASK, cmd.data); + + if (FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags)) + dev_err(&func->dev, "Unexpected interrupt (1) int=%lx\n", + FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags)); *int_status = tmp; @@ -854,16 +768,11 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) int vmm_ctl; if (sdio_priv->has_thrpt_enh3) { - u32 reg; + u32 reg = 0; - if (sdio_priv->irq_gpio) { - u32 flags; + if (sdio_priv->irq_gpio) + reg = val & (BIT(MAX_NUM_INT) - 1); - flags = val & (BIT(MAX_NUN_INT_THRPT_ENH2) - 1); - reg = flags; - } else { - reg = 0; - } /* select VMM table 0 */ if (val & SEL_VMM_TBL0) reg |= BIT(5); @@ -879,14 +788,14 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = 0xf8; + cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; cmd.data = reg; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, - "Failed cmd52, set 0xf8 data (%d) ...\n", - __LINE__); + "Failed cmd52, set (%02x) data (%d) ...\n", + cmd.address, __LINE__); return ret; } } @@ -899,38 +808,36 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) * Must clear each interrupt individually. */ u32 flags; + int i; flags = val & (BIT(MAX_NUM_INT) - 1); - if (flags) { - int i; - - for (i = 0; i < sdio_priv->nint; i++) { - if (flags & 1) { - struct sdio_cmd52 cmd; - - cmd.read_write = 1; - cmd.function = 0; - cmd.raw = 0; - cmd.address = 0xf8; - cmd.data = BIT(i); - - ret = wilc_sdio_cmd52(wilc, &cmd); - if (ret) { - dev_err(&func->dev, - "Failed cmd52, set 0xf8 data (%d) ...\n", - __LINE__); - return ret; - } + for (i = 0; i < NUM_INT_EXT && flags; i++) { + if (flags & BIT(i)) { + struct sdio_cmd52 cmd; + + cmd.read_write = 1; + cmd.function = 0; + cmd.raw = 0; + cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; + cmd.data = BIT(i); + + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, + "Failed cmd52, set (%02x) data (%d) ...\n", + cmd.address, __LINE__); + return ret; } - flags >>= 1; + flags &= ~BIT(i); } + } - for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) { - if (flags & 1) - dev_err(&func->dev, - "Unexpected interrupt cleared %d...\n", - i); - flags >>= 1; + for (i = NUM_INT_EXT; i < MAX_NUM_INT && flags; i++) { + if (flags & BIT(i)) { + dev_err(&func->dev, + "Unexpected interrupt cleared %d...\n", + i); + flags &= ~BIT(i); } } } @@ -952,13 +859,13 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = 0xf6; + cmd.address = WILC_SDIO_VMM_TBL_CTRL_REG; cmd.data = vmm_ctl; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, - "Failed cmd52, set 0xf6 data (%d) ...\n", - __LINE__); + "Failed cmd52, set (%02x) data (%d) ...\n", + cmd.address, __LINE__); return ret; } } @@ -975,13 +882,6 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint) dev_err(&func->dev, "Too many interrupts (%d)...\n", nint); return -EINVAL; } - if (nint > MAX_NUN_INT_THRPT_ENH2) { - dev_err(&func->dev, - "Cannot support more than 5 interrupts when has_thrpt_enh2=1.\n"); - return -EINVAL; - } - - sdio_priv->nint = nint; /** * Disable power sequencer @@ -1097,7 +997,7 @@ static int wilc_sdio_resume(struct device *dev) } static const struct of_device_id wilc_of_match[] = { - { .compatible = "microchip,wilc1000-sdio", }, + { .compatible = "microchip,wilc1000", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, wilc_of_match); diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c index 3ffc7b4fddf6..3f19e3f38a39 100644 --- a/drivers/staging/wilc1000/spi.c +++ b/drivers/staging/wilc1000/spi.c @@ -6,72 +6,19 @@ #include <linux/clk.h> #include <linux/spi/spi.h> +#include <linux/crc7.h> #include "netdev.h" #include "cfg80211.h" struct wilc_spi { int crc_off; - int nint; }; static const struct wilc_hif_func wilc_hif_spi; /******************************************** * - * Crc7 - * - ********************************************/ - -static const u8 crc7_syndrome_table[256] = { - 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, - 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, - 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26, - 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e, - 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d, - 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45, - 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14, - 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c, - 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b, - 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13, - 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42, - 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a, - 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69, - 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21, - 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70, - 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38, - 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e, - 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36, - 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67, - 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f, - 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, - 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, - 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55, - 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d, - 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a, - 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52, - 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03, - 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b, - 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28, - 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60, - 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31, - 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79 -}; - -static u8 crc7_byte(u8 crc, u8 data) -{ - return crc7_syndrome_table[(crc << 1) ^ data]; -} - -static u8 crc7(u8 crc, const u8 *buffer, u32 len) -{ - while (len--) - crc = crc7_byte(crc, *buffer++); - return crc; -} - -/******************************************** - * * Spi protocol Function * ********************************************/ @@ -97,25 +44,62 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len) #define USE_SPI_DMA 0 +#define WILC_SPI_COMMAND_STAT_SUCCESS 0 +#define WILC_GET_RESP_HDR_START(h) (((h) >> 4) & 0xf) + +struct wilc_spi_cmd { + u8 cmd_type; + union { + struct { + u8 addr[3]; + u8 crc[]; + } __packed simple_cmd; + struct { + u8 addr[3]; + u8 size[2]; + u8 crc[]; + } __packed dma_cmd; + struct { + u8 addr[3]; + u8 size[3]; + u8 crc[]; + } __packed dma_cmd_ext; + struct { + u8 addr[2]; + __be32 data; + u8 crc[]; + } __packed internal_w_cmd; + struct { + u8 addr[3]; + __be32 data; + u8 crc[]; + } __packed w_cmd; + } u; +} __packed; + +struct wilc_spi_read_rsp_data { + u8 rsp_cmd_type; + u8 status; + u8 resp_header; + u8 resp_data[4]; + u8 crc[]; +} __packed; + +struct wilc_spi_rsp_data { + u8 rsp_cmd_type; + u8 status; +} __packed; + static int wilc_bus_probe(struct spi_device *spi) { int ret; struct wilc *wilc; - struct gpio_desc *gpio; struct wilc_spi *spi_priv; spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL); if (!spi_priv) return -ENOMEM; - gpio = gpiod_get(&spi->dev, "irq", GPIOD_IN); - if (IS_ERR(gpio)) { - /* get the GPIO descriptor from hardcode GPIO number */ - gpio = gpio_to_desc(GPIO_NUM); - if (!gpio) - dev_err(&spi->dev, "failed to get the irq gpio\n"); - } - ret = wilc_cfg80211_init(&wilc, &spi->dev, WILC_HIF_SPI, &wilc_hif_spi); if (ret) { kfree(spi_priv); @@ -125,7 +109,7 @@ static int wilc_bus_probe(struct spi_device *spi) spi_set_drvdata(spi, wilc); wilc->dev = &spi->dev; wilc->bus_data = spi_priv; - wilc->gpio_irq = gpio; + wilc->dev_irq_num = spi->irq; wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk"); if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) @@ -140,10 +124,6 @@ static int wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); - /* free the GPIO in module remove */ - if (wilc->gpio_irq) - gpiod_put(wilc->gpio_irq); - if (!IS_ERR(wilc->rtc_clk)) clk_disable_unprepare(wilc->rtc_clk); @@ -152,7 +132,7 @@ static int wilc_bus_remove(struct spi_device *spi) } static const struct of_device_id wilc_of_match[] = { - { .compatible = "microchip,wilc1000-spi", }, + { .compatible = "microchip,wilc1000", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, wilc_of_match); @@ -178,7 +158,10 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len) struct spi_transfer tr = { .tx_buf = b, .len = len, - .delay_usecs = 0, + .delay = { + .value = 0, + .unit = SPI_DELAY_UNIT_USECS + }, }; char *r_buffer = kzalloc(len, GFP_KERNEL); @@ -219,7 +202,10 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen) struct spi_transfer tr = { .rx_buf = rb, .len = rlen, - .delay_usecs = 0, + .delay = { + .value = 0, + .unit = SPI_DELAY_UNIT_USECS + }, }; char *t_buffer = kzalloc(rlen, GFP_KERNEL); @@ -261,7 +247,10 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) .tx_buf = wb, .len = rlen, .bits_per_word = 8, - .delay_usecs = 0, + .delay = { + .value = 0, + .unit = SPI_DELAY_UNIT_USECS + }, }; @@ -284,335 +273,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) return ret; } -static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, - u8 clockless) -{ - struct spi_device *spi = to_spi_device(wilc->dev); - struct wilc_spi *spi_priv = wilc->bus_data; - u8 wb[32], rb[32]; - u8 wix, rix; - u32 len2; - u8 rsp; - int len = 0; - int result = 0; - int retry; - u8 crc[2]; - - wb[0] = cmd; - switch (cmd) { - case CMD_SINGLE_READ: /* single word (4 bytes) read */ - wb[1] = (u8)(adr >> 16); - wb[2] = (u8)(adr >> 8); - wb[3] = (u8)adr; - len = 5; - break; - - case CMD_INTERNAL_READ: /* internal register read */ - wb[1] = (u8)(adr >> 8); - if (clockless == 1) - wb[1] |= BIT(7); - wb[2] = (u8)adr; - wb[3] = 0x00; - len = 5; - break; - - case CMD_TERMINATE: - wb[1] = 0x00; - wb[2] = 0x00; - wb[3] = 0x00; - len = 5; - break; - - case CMD_REPEAT: - wb[1] = 0x00; - wb[2] = 0x00; - wb[3] = 0x00; - len = 5; - break; - - case CMD_RESET: - wb[1] = 0xff; - wb[2] = 0xff; - wb[3] = 0xff; - len = 5; - break; - - case CMD_DMA_WRITE: /* dma write */ - case CMD_DMA_READ: /* dma read */ - wb[1] = (u8)(adr >> 16); - wb[2] = (u8)(adr >> 8); - wb[3] = (u8)adr; - wb[4] = (u8)(sz >> 8); - wb[5] = (u8)(sz); - len = 7; - break; - - case CMD_DMA_EXT_WRITE: /* dma extended write */ - case CMD_DMA_EXT_READ: /* dma extended read */ - wb[1] = (u8)(adr >> 16); - wb[2] = (u8)(adr >> 8); - wb[3] = (u8)adr; - wb[4] = (u8)(sz >> 16); - wb[5] = (u8)(sz >> 8); - wb[6] = (u8)(sz); - len = 8; - break; - - case CMD_INTERNAL_WRITE: /* internal register write */ - wb[1] = (u8)(adr >> 8); - if (clockless == 1) - wb[1] |= BIT(7); - wb[2] = (u8)(adr); - wb[3] = b[3]; - wb[4] = b[2]; - wb[5] = b[1]; - wb[6] = b[0]; - len = 8; - break; - - case CMD_SINGLE_WRITE: /* single word write */ - wb[1] = (u8)(adr >> 16); - wb[2] = (u8)(adr >> 8); - wb[3] = (u8)(adr); - wb[4] = b[3]; - wb[5] = b[2]; - wb[6] = b[1]; - wb[7] = b[0]; - len = 9; - break; - - default: - result = -EINVAL; - break; - } - - if (result) - return result; - - if (!spi_priv->crc_off) - wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1; - else - len -= 1; - -#define NUM_SKIP_BYTES (1) -#define NUM_RSP_BYTES (2) -#define NUM_DATA_HDR_BYTES (1) -#define NUM_DATA_BYTES (4) -#define NUM_CRC_BYTES (2) -#define NUM_DUMMY_BYTES (3) - if (cmd == CMD_RESET || - cmd == CMD_TERMINATE || - cmd == CMD_REPEAT) { - len2 = len + (NUM_SKIP_BYTES + NUM_RSP_BYTES + NUM_DUMMY_BYTES); - } else if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) { - int tmp = NUM_RSP_BYTES + NUM_DATA_HDR_BYTES + NUM_DATA_BYTES - + NUM_DUMMY_BYTES; - if (!spi_priv->crc_off) - len2 = len + tmp + NUM_CRC_BYTES; - else - len2 = len + tmp; - } else { - len2 = len + (NUM_RSP_BYTES + NUM_DUMMY_BYTES); - } -#undef NUM_DUMMY_BYTES - - if (len2 > ARRAY_SIZE(wb)) { - dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n", - len2, ARRAY_SIZE(wb)); - return -EINVAL; - } - /* zero spi write buffers. */ - for (wix = len; wix < len2; wix++) - wb[wix] = 0; - rix = len; - - if (wilc_spi_tx_rx(wilc, wb, rb, len2)) { - dev_err(&spi->dev, "Failed cmd write, bus error...\n"); - return -EINVAL; - } - - /* - * Command/Control response - */ - if (cmd == CMD_RESET || cmd == CMD_TERMINATE || cmd == CMD_REPEAT) - rix++; /* skip 1 byte */ - - rsp = rb[rix++]; - - if (rsp != cmd) { - dev_err(&spi->dev, - "Failed cmd response, cmd (%02x), resp (%02x)\n", - cmd, rsp); - return -EINVAL; - } - - /* - * State response - */ - rsp = rb[rix++]; - if (rsp != 0x00) { - dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", - rsp); - return -EINVAL; - } - - if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ || - cmd == CMD_DMA_READ || cmd == CMD_DMA_EXT_READ) { - /* - * Data Respnose header - */ - retry = 100; - do { - /* - * ensure there is room in buffer later - * to read data and crc - */ - if (rix < len2) { - rsp = rb[rix++]; - } else { - retry = 0; - break; - } - if (((rsp >> 4) & 0xf) == 0xf) - break; - } while (retry--); - - if (retry <= 0) { - dev_err(&spi->dev, - "Error, data read response (%02x)\n", rsp); - return -EAGAIN; - } - } - - if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) { - /* - * Read bytes - */ - if ((rix + 3) < len2) { - b[0] = rb[rix++]; - b[1] = rb[rix++]; - b[2] = rb[rix++]; - b[3] = rb[rix++]; - } else { - dev_err(&spi->dev, - "buffer overrun when reading data.\n"); - return -EINVAL; - } - - if (!spi_priv->crc_off) { - /* - * Read Crc - */ - if ((rix + 1) < len2) { - crc[0] = rb[rix++]; - crc[1] = rb[rix++]; - } else { - dev_err(&spi->dev, - "buffer overrun when reading crc.\n"); - return -EINVAL; - } - } - } else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) { - int ix; - - /* some data may be read in response to dummy bytes. */ - for (ix = 0; (rix < len2) && (ix < sz); ) - b[ix++] = rb[rix++]; - - sz -= ix; - - if (sz > 0) { - int nbytes; - - if (sz <= (DATA_PKT_SZ - ix)) - nbytes = sz; - else - nbytes = DATA_PKT_SZ - ix; - - /* - * Read bytes - */ - if (wilc_spi_rx(wilc, &b[ix], nbytes)) { - dev_err(&spi->dev, - "Failed block read, bus err\n"); - return -EINVAL; - } - - /* - * Read Crc - */ - if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) { - dev_err(&spi->dev, - "Failed block crc read, bus err\n"); - return -EINVAL; - } - - ix += nbytes; - sz -= nbytes; - } - - /* - * if any data in left unread, - * then read the rest using normal DMA code. - */ - while (sz > 0) { - int nbytes; - - if (sz <= DATA_PKT_SZ) - nbytes = sz; - else - nbytes = DATA_PKT_SZ; - - /* - * read data response only on the next DMA cycles not - * the first DMA since data response header is already - * handled above for the first DMA. - */ - /* - * Data Respnose header - */ - retry = 10; - do { - if (wilc_spi_rx(wilc, &rsp, 1)) { - dev_err(&spi->dev, - "Failed resp read, bus err\n"); - result = -EINVAL; - break; - } - if (((rsp >> 4) & 0xf) == 0xf) - break; - } while (retry--); - - if (result) - break; - - /* - * Read bytes - */ - if (wilc_spi_rx(wilc, &b[ix], nbytes)) { - dev_err(&spi->dev, - "Failed block read, bus err\n"); - result = -EINVAL; - break; - } - - /* - * Read Crc - */ - if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) { - dev_err(&spi->dev, - "Failed block crc read, bus err\n"); - result = -EINVAL; - break; - } - - ix += nbytes; - sz -= nbytes; - } - } - return result; -} - static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) { struct spi_device *spi = to_spi_device(wilc->dev); @@ -686,19 +346,333 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) * Spi Internal Read/Write Function * ********************************************/ +static u8 wilc_get_crc7(u8 *buffer, u32 len) +{ + return crc7_be(0xfe, buffer, len); +} + +static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, + u8 clockless) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + u8 wb[32], rb[32]; + int cmd_len, resp_len; + u8 crc[2]; + struct wilc_spi_cmd *c; + struct wilc_spi_read_rsp_data *r; + + memset(wb, 0x0, sizeof(wb)); + memset(rb, 0x0, sizeof(rb)); + c = (struct wilc_spi_cmd *)wb; + c->cmd_type = cmd; + if (cmd == CMD_SINGLE_READ) { + c->u.simple_cmd.addr[0] = adr >> 16; + c->u.simple_cmd.addr[1] = adr >> 8; + c->u.simple_cmd.addr[2] = adr; + } else if (cmd == CMD_INTERNAL_READ) { + c->u.simple_cmd.addr[0] = adr >> 8; + if (clockless == 1) + c->u.simple_cmd.addr[0] |= BIT(7); + c->u.simple_cmd.addr[1] = adr; + c->u.simple_cmd.addr[2] = 0x0; + } else { + dev_err(&spi->dev, "cmd [%x] not supported\n", cmd); + return -EINVAL; + } + + cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); + resp_len = sizeof(*r); + if (!spi_priv->crc_off) { + c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); + cmd_len += 1; + resp_len += 2; + } + + if (cmd_len + resp_len > ARRAY_SIZE(wb)) { + dev_err(&spi->dev, + "spi buffer size too small (%d) (%d) (%zu)\n", + cmd_len, resp_len, ARRAY_SIZE(wb)); + return -EINVAL; + } + + if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { + dev_err(&spi->dev, "Failed cmd write, bus error...\n"); + return -EINVAL; + } + + r = (struct wilc_spi_read_rsp_data *)&rb[cmd_len]; + if (r->rsp_cmd_type != cmd) { + dev_err(&spi->dev, + "Failed cmd response, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); + return -EINVAL; + } + + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", + r->status); + return -EINVAL; + } + + if (WILC_GET_RESP_HDR_START(r->resp_header) != 0xf) { + dev_err(&spi->dev, "Error, data read response (%02x)\n", + r->resp_header); + return -EINVAL; + } + + if (b) + memcpy(b, r->resp_data, 4); + + if (!spi_priv->crc_off) + memcpy(crc, r->crc, 2); + + return 0; +} + +static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, + u8 clockless) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + u8 wb[32], rb[32]; + int cmd_len, resp_len; + struct wilc_spi_cmd *c; + struct wilc_spi_rsp_data *r; + + memset(wb, 0x0, sizeof(wb)); + memset(rb, 0x0, sizeof(rb)); + c = (struct wilc_spi_cmd *)wb; + c->cmd_type = cmd; + if (cmd == CMD_INTERNAL_WRITE) { + c->u.internal_w_cmd.addr[0] = adr >> 8; + if (clockless == 1) + c->u.internal_w_cmd.addr[0] |= BIT(7); + + c->u.internal_w_cmd.addr[1] = adr; + c->u.internal_w_cmd.data = cpu_to_be32(data); + cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc); + if (!spi_priv->crc_off) + c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); + } else if (cmd == CMD_SINGLE_WRITE) { + c->u.w_cmd.addr[0] = adr >> 16; + c->u.w_cmd.addr[1] = adr >> 8; + c->u.w_cmd.addr[2] = adr; + c->u.w_cmd.data = cpu_to_be32(data); + cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc); + if (!spi_priv->crc_off) + c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); + } else { + dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd); + return -EINVAL; + } + + if (!spi_priv->crc_off) + cmd_len += 1; + + resp_len = sizeof(*r); + + if (cmd_len + resp_len > ARRAY_SIZE(wb)) { + dev_err(&spi->dev, + "spi buffer size too small (%d) (%d) (%zu)\n", + cmd_len, resp_len, ARRAY_SIZE(wb)); + return -EINVAL; + } + + if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { + dev_err(&spi->dev, "Failed cmd write, bus error...\n"); + return -EINVAL; + } + + r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; + if (r->rsp_cmd_type != cmd) { + dev_err(&spi->dev, + "Failed cmd response, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); + return -EINVAL; + } + + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", + r->status); + return -EINVAL; + } + + return 0; +} + +static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + u8 wb[32], rb[32]; + int cmd_len, resp_len; + int retry, ix = 0; + u8 crc[2]; + struct wilc_spi_cmd *c; + struct wilc_spi_rsp_data *r; + + memset(wb, 0x0, sizeof(wb)); + memset(rb, 0x0, sizeof(rb)); + c = (struct wilc_spi_cmd *)wb; + c->cmd_type = cmd; + if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_READ) { + c->u.dma_cmd.addr[0] = adr >> 16; + c->u.dma_cmd.addr[1] = adr >> 8; + c->u.dma_cmd.addr[2] = adr; + c->u.dma_cmd.size[0] = sz >> 8; + c->u.dma_cmd.size[1] = sz; + cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc); + if (!spi_priv->crc_off) + c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); + } else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) { + c->u.dma_cmd_ext.addr[0] = adr >> 16; + c->u.dma_cmd_ext.addr[1] = adr >> 8; + c->u.dma_cmd_ext.addr[2] = adr; + c->u.dma_cmd_ext.size[0] = sz >> 16; + c->u.dma_cmd_ext.size[1] = sz >> 8; + c->u.dma_cmd_ext.size[2] = sz; + cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc); + if (!spi_priv->crc_off) + c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len); + } else { + dev_err(&spi->dev, "dma read write cmd [%x] not supported\n", + cmd); + return -EINVAL; + } + if (!spi_priv->crc_off) + cmd_len += 1; + + resp_len = sizeof(*r); + + if (cmd_len + resp_len > ARRAY_SIZE(wb)) { + dev_err(&spi->dev, "spi buffer size too small (%d)(%d) (%zu)\n", + cmd_len, resp_len, ARRAY_SIZE(wb)); + return -EINVAL; + } + + if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { + dev_err(&spi->dev, "Failed cmd write, bus error...\n"); + return -EINVAL; + } + + r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; + if (r->rsp_cmd_type != cmd) { + dev_err(&spi->dev, + "Failed cmd response, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); + return -EINVAL; + } + + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", + r->status); + return -EINVAL; + } + + if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_EXT_WRITE) + return 0; + + while (sz > 0) { + int nbytes; + u8 rsp; + + if (sz <= DATA_PKT_SZ) + nbytes = sz; + else + nbytes = DATA_PKT_SZ; + + /* + * Data Response header + */ + retry = 100; + do { + if (wilc_spi_rx(wilc, &rsp, 1)) { + dev_err(&spi->dev, + "Failed resp read, bus err\n"); + return -EINVAL; + } + if (WILC_GET_RESP_HDR_START(rsp) == 0xf) + break; + } while (retry--); + + /* + * Read bytes + */ + if (wilc_spi_rx(wilc, &b[ix], nbytes)) { + dev_err(&spi->dev, + "Failed block read, bus err\n"); + return -EINVAL; + } + + /* + * Read Crc + */ + if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) { + dev_err(&spi->dev, + "Failed block crc read, bus err\n"); + return -EINVAL; + } + + ix += nbytes; + sz -= nbytes; + } + return 0; +} + +static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + int result; + u8 cmd = CMD_SINGLE_READ; + u8 clockless = 0; + + if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) { + /* Clockless register */ + cmd = CMD_INTERNAL_READ; + clockless = 1; + } + + result = wilc_spi_single_read(wilc, cmd, addr, data, clockless); + if (result) { + dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr); + return result; + } + + le32_to_cpus(data); + + return 0; +} + +static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + int result; + + if (size <= 4) + return -EINVAL; + + result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_READ, addr, buf, size); + if (result) { + dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr); + return result; + } + + return 0; +} static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) { struct spi_device *spi = to_spi_device(wilc->dev); int result; - cpu_to_le32s(&dat); - result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4, - 0); - if (result) + result = wilc_spi_write_cmd(wilc, CMD_INTERNAL_WRITE, adr, dat, 0); + if (result) { dev_err(&spi->dev, "Failed internal write cmd...\n"); + return result; + } - return result; + return 0; } static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) @@ -706,8 +680,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) struct spi_device *spi = to_spi_device(wilc->dev); int result; - result = spi_cmd_complete(wilc, CMD_INTERNAL_READ, adr, (u8 *)data, 4, - 0); + result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0); if (result) { dev_err(&spi->dev, "Failed internal read cmd...\n"); return result; @@ -715,7 +688,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) le32_to_cpus(data); - return result; + return 0; } /******************************************** @@ -731,18 +704,19 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) u8 cmd = CMD_SINGLE_WRITE; u8 clockless = 0; - cpu_to_le32s(&data); - if (addr < 0x30) { + if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) { /* Clockless register */ cmd = CMD_INTERNAL_WRITE; clockless = 1; } - result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless); - if (result) + result = wilc_spi_write_cmd(wilc, cmd, addr, data, clockless); + if (result) { dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr); + return result; + } - return result; + return 0; } static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) @@ -756,7 +730,7 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) if (size <= 4) return -EINVAL; - result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0); + result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size); if (result) { dev_err(&spi->dev, "Failed cmd, write block (%08x)...\n", addr); @@ -767,51 +741,14 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) * Data */ result = spi_data_write(wilc, buf, size); - if (result) - dev_err(&spi->dev, "Failed block data write...\n"); - - return result; -} - -static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data) -{ - struct spi_device *spi = to_spi_device(wilc->dev); - int result; - u8 cmd = CMD_SINGLE_READ; - u8 clockless = 0; - - if (addr < 0x30) { - /* Clockless register */ - cmd = CMD_INTERNAL_READ; - clockless = 1; - } - - result = spi_cmd_complete(wilc, cmd, addr, (u8 *)data, 4, clockless); if (result) { - dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr); + dev_err(&spi->dev, "Failed block data write...\n"); return result; } - le32_to_cpus(data); - return 0; } -static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) -{ - struct spi_device *spi = to_spi_device(wilc->dev); - int result; - - if (size <= 4) - return -EINVAL; - - result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0); - if (result) - dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr); - - return result; -} - /******************************************** * * Bus interfaces @@ -836,7 +773,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) int ret; if (isinit) { - ret = wilc_spi_read_reg(wilc, 0x1000, &chipid); + ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) dev_err(&spi->dev, "Fail cmd read chip id...\n"); @@ -888,7 +825,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) /* * make sure can read back chip id correctly */ - ret = wilc_spi_read_reg(wilc, 0x1000, &chipid); + ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { dev_err(&spi->dev, "Fail cmd read chip id...\n"); return ret; @@ -903,26 +840,28 @@ static int wilc_spi_read_size(struct wilc *wilc, u32 *size) { int ret; - ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, size); - *size = *size & IRQ_DMA_WD_CNT_MASK; + ret = spi_internal_read(wilc, + WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE, size); + *size = FIELD_GET(IRQ_DMA_WD_CNT_MASK, *size); return ret; } static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status) { - return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, int_status); + return spi_internal_read(wilc, WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE, + int_status); } static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val) { - return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE, val); + return spi_internal_write(wilc, WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, + val); } static int wilc_spi_sync_ext(struct wilc *wilc, int nint) { struct spi_device *spi = to_spi_device(wilc->dev); - struct wilc_spi *spi_priv = wilc->bus_data; u32 reg; int ret, i; @@ -931,8 +870,6 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint) return -EINVAL; } - spi_priv->nint = nint; - /* * interrupt pin mux select */ diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c index 601e4d1345d2..6a82fb2f283e 100644 --- a/drivers/staging/wilc1000/wlan.c +++ b/drivers/staging/wilc1000/wlan.c @@ -11,7 +11,7 @@ static inline bool is_wilc1000(u32 id) { - return (id & 0xfffff000) == 0x100000; + return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID; } static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire) @@ -393,62 +393,67 @@ void chip_allow_sleep(struct wilc *wilc) { u32 reg = 0; - wilc->hif_func->hif_read_reg(wilc, 0xf0, ®); + wilc->hif_func->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, ®); - wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); - wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); + wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, + reg & ~WILC_SDIO_WAKEUP_BIT); + wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, 0); } EXPORT_SYMBOL_GPL(chip_allow_sleep); void chip_wakeup(struct wilc *wilc) { u32 reg, clk_status_reg; + const struct wilc_hif_func *h = wilc->hif_func; - if ((wilc->io_type & 0x1) == WILC_HIF_SPI) { + if (wilc->io_type == WILC_HIF_SPI) { do { - wilc->hif_func->hif_read_reg(wilc, 1, ®); - wilc->hif_func->hif_write_reg(wilc, 1, reg | BIT(1)); - wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1)); + h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, ®); + h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG, + reg | WILC_SPI_WAKEUP_BIT); + h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG, + reg & ~WILC_SPI_WAKEUP_BIT); do { usleep_range(2000, 2500); wilc_get_chipid(wilc, true); } while (wilc_get_chipid(wilc, true) == 0); } while (wilc_get_chipid(wilc, true) == 0); - } else if ((wilc->io_type & 0x1) == WILC_HIF_SDIO) { - wilc->hif_func->hif_write_reg(wilc, 0xfa, 1); + } else if (wilc->io_type == WILC_HIF_SDIO) { + h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, + WILC_SDIO_HOST_TO_FW_BIT); usleep_range(200, 400); - wilc->hif_func->hif_read_reg(wilc, 0xf0, ®); + h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, ®); do { - wilc->hif_func->hif_write_reg(wilc, 0xf0, - reg | BIT(0)); - wilc->hif_func->hif_read_reg(wilc, 0xf1, - &clk_status_reg); + h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, + reg | WILC_SDIO_WAKEUP_BIT); + h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG, + &clk_status_reg); - while ((clk_status_reg & 0x1) == 0) { + while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) { usleep_range(2000, 2500); - wilc->hif_func->hif_read_reg(wilc, 0xf1, - &clk_status_reg); + h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG, + &clk_status_reg); } - if ((clk_status_reg & 0x1) == 0) { - wilc->hif_func->hif_write_reg(wilc, 0xf0, - reg & (~BIT(0))); + if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) { + h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, + reg & ~WILC_SDIO_WAKEUP_BIT); } - } while ((clk_status_reg & 0x1) == 0); + } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)); } if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) { - if (wilc_get_chipid(wilc, false) < 0x1002b0) { + if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) { u32 val32; - wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32); + h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32); val32 |= BIT(6); - wilc->hif_func->hif_write_reg(wilc, 0x1e1c, val32); + h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32); - wilc->hif_func->hif_read_reg(wilc, 0x1e9c, &val32); + h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32); val32 |= BIT(6); - wilc->hif_func->hif_write_reg(wilc, 0x1e9c, val32); + h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32); } } wilc->chip_ps_state = WILC_CHIP_WAKEDUP; @@ -458,7 +463,7 @@ EXPORT_SYMBOL_GPL(chip_wakeup); void host_wakeup_notify(struct wilc *wilc) { acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); - wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); + wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1); release_bus(wilc, WILC_BUS_RELEASE_ONLY); } EXPORT_SYMBOL_GPL(host_wakeup_notify); @@ -466,7 +471,7 @@ EXPORT_SYMBOL_GPL(host_wakeup_notify); void host_sleep_notify(struct wilc *wilc) { acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); - wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); + wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1); release_bus(wilc, WILC_BUS_RELEASE_ONLY); } EXPORT_SYMBOL_GPL(host_sleep_notify); @@ -508,9 +513,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) vmm_sz = HOST_HDR_OFFSET; vmm_sz += tqe->buffer_size; - - if (vmm_sz & 0x3) - vmm_sz = (vmm_sz + 4) & ~0x3; + vmm_sz = ALIGN(vmm_sz, 4); if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE) break; @@ -568,11 +571,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, ®); if (ret) break; - if ((reg >> 2) & 0x1) { - entries = ((reg >> 3) & 0x3f); + if (FIELD_GET(WILC_VMM_ENTRY_AVAILABLE, reg)) { + entries = FIELD_GET(WILC_VMM_ENTRY_COUNT, reg); break; } - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); } while (--timeout); if (timeout <= 0) { ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0); @@ -610,6 +612,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) do { u32 header, buffer_offset; char *bssid; + u8 mgmt_ptk = 0; tqe = wilc_wlan_txq_remove_from_head(dev); if (!tqe) @@ -620,15 +623,16 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) break; le32_to_cpus(&vmm_table[i]); - vmm_sz = (vmm_table[i] & 0x3ff); + vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]); vmm_sz *= 4; - header = (tqe->type << 31) | - (tqe->buffer_size << 15) | - vmm_sz; + if (tqe->type == WILC_MGMT_PKT) - header |= BIT(30); - else - header &= ~BIT(30); + mgmt_ptk = 1; + + header = (FIELD_PREP(WILC_VMM_HDR_TYPE, tqe->type) | + FIELD_PREP(WILC_VMM_HDR_MGMT_FIELD, mgmt_ptk) | + FIELD_PREP(WILC_VMM_HDR_PKT_SIZE, tqe->buffer_size) | + FIELD_PREP(WILC_VMM_HDR_BUFF_SIZE, vmm_sz)); cpu_to_le32s(&header); memcpy(&txb[offset], &header, 4); @@ -686,10 +690,10 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size) buff_ptr = buffer + offset; header = get_unaligned_le32(buff_ptr); - is_cfg_packet = (header >> 31) & 0x1; - pkt_offset = (header >> 22) & 0x1ff; - tp_len = (header >> 11) & 0x7ff; - pkt_len = header & 0x7ff; + is_cfg_packet = FIELD_GET(WILC_PKT_HDR_CONFIG_FIELD, header); + pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); + tp_len = FIELD_GET(WILC_PKT_HDR_TOTAL_LEN_FIELD, header); + pkt_len = FIELD_GET(WILC_PKT_HDR_LEN_FIELD, header); if (pkt_len == 0 || tp_len == 0) break; @@ -699,10 +703,8 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size) wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len); } else { if (!is_cfg_packet) { - if (pkt_len > 0) { - wilc_frmw_to_host(wilc, buff_ptr, - pkt_len, pkt_offset); - } + wilc_frmw_to_host(wilc, buff_ptr, pkt_len, + pkt_offset); } else { struct wilc_cfg_rsp rsp; @@ -758,11 +760,11 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status) int ret = 0; struct rxq_entry_t *rqe; - size = (int_status & 0x7fff) << 2; + size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, int_status) << 2; while (!size && retries < 10) { wilc->hif_func->hif_read_size(wilc, &size); - size = (size & 0x7fff) << 2; + size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, size) << 2; retries++; } @@ -887,7 +889,7 @@ int wilc_wlan_start(struct wilc *wilc) wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT); - ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid); + ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; @@ -1128,18 +1130,24 @@ static int init_chip(struct net_device *dev) chipid = wilc_get_chipid(wilc, true); if ((chipid & 0xfff) != 0xa0) { - ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, ®); + ret = wilc->hif_func->hif_read_reg(wilc, + WILC_CORTUS_RESET_MUX_SEL, + ®); if (ret) { netdev_err(dev, "fail read reg 0x1118\n"); goto release; } reg |= BIT(0); - ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); + ret = wilc->hif_func->hif_write_reg(wilc, + WILC_CORTUS_RESET_MUX_SEL, + reg); if (ret) { netdev_err(dev, "fail write reg 0x1118\n"); goto release; } - ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); + ret = wilc->hif_func->hif_write_reg(wilc, + WILC_CORTUS_BOOT_REGISTER, + WILC_CORTUS_BOOT_FROM_IRAM); if (ret) { netdev_err(dev, "fail write reg 0xc0000\n"); goto release; @@ -1159,20 +1167,21 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update) u32 rfrevid = 0; if (chipid == 0 || update) { - wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid); - wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid); + wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &tempchipid); + wilc->hif_func->hif_read_reg(wilc, WILC_RF_REVISION_ID, + &rfrevid); if (!is_wilc1000(tempchipid)) { chipid = 0; return chipid; } - if (tempchipid == 0x1002a0) { + if (tempchipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */ if (rfrevid != 0x1) - tempchipid = 0x1002a1; - } else if (tempchipid == 0x1002b0) { + tempchipid = WILC_1000_BASE_ID_2A_REV1; + } else if (tempchipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */ if (rfrevid == 0x4) - tempchipid = 0x1002b1; + tempchipid = WILC_1000_BASE_ID_2B_REV1; else if (rfrevid != 0x3) - tempchipid = 0x1002b2; + tempchipid = WILC_1000_BASE_ID_2B_REV2; } chipid = tempchipid; diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/staging/wilc1000/wlan.h index 8c4634262adb..7689569cd82f 100644 --- a/drivers/staging/wilc1000/wlan.h +++ b/drivers/staging/wilc1000/wlan.h @@ -8,6 +8,7 @@ #define WILC_WLAN_H #include <linux/types.h> +#include <linux/bitfield.h> /******************************************** * @@ -65,6 +66,8 @@ #define WILC_INTR_CLEAR (WILC_INTR_REG_BASE + 0x30) #define WILC_INTR_STATUS (WILC_INTR_REG_BASE + 0x40) +#define WILC_RF_REVISION_ID 0x13f4 + #define WILC_VMM_TBL_SIZE 64 #define WILC_VMM_TX_TBL_BASE 0x150400 #define WILC_VMM_RX_TBL_BASE 0x150500 @@ -88,10 +91,53 @@ #define WILC_SPI_TX_MODE (WILC_SPI_REG_BASE + 0x20) #define WILC_SPI_PROTOCOL_CONFIG (WILC_SPI_REG_BASE + 0x24) #define WILC_SPI_INTR_CTL (WILC_SPI_REG_BASE + 0x2c) +#define WILC_SPI_INT_STATUS (WILC_SPI_REG_BASE + 0x40) +#define WILC_SPI_INT_CLEAR (WILC_SPI_REG_BASE + 0x44) + +#define WILC_SPI_WAKEUP_REG 0x1 +#define WILC_SPI_WAKEUP_BIT BIT(1) #define WILC_SPI_PROTOCOL_OFFSET (WILC_SPI_PROTOCOL_CONFIG - \ WILC_SPI_REG_BASE) +#define WILC_SPI_CLOCKLESS_ADDR_LIMIT 0x30 + +/* Functions IO enables bits */ +#define WILC_SDIO_CCCR_IO_EN_FUNC1 BIT(1) + +/* Function/Interrupt enables bits */ +#define WILC_SDIO_CCCR_IEN_MASTER BIT(0) +#define WILC_SDIO_CCCR_IEN_FUNC1 BIT(1) + +/* Abort CCCR register bits */ +#define WILC_SDIO_CCCR_ABORT_RESET BIT(3) + +/* Vendor specific CCCR registers */ +#define WILC_SDIO_WAKEUP_REG 0xf0 +#define WILC_SDIO_WAKEUP_BIT BIT(0) + +#define WILC_SDIO_CLK_STATUS_REG 0xf1 +#define WILC_SDIO_CLK_STATUS_BIT BIT(0) + +#define WILC_SDIO_INTERRUPT_DATA_SZ_REG 0xf2 /* Read size (2 bytes) */ + +#define WILC_SDIO_VMM_TBL_CTRL_REG 0xf6 +#define WILC_SDIO_IRQ_FLAG_REG 0xf7 +#define WILC_SDIO_IRQ_CLEAR_FLAG_REG 0xf8 + +#define WILC_SDIO_HOST_TO_FW_REG 0xfa +#define WILC_SDIO_HOST_TO_FW_BIT BIT(0) + +#define WILC_SDIO_FW_TO_HOST_REG 0xfc +#define WILC_SDIO_FW_TO_HOST_BIT BIT(0) + +/* Function 1 specific FBR register */ +#define WILC_SDIO_FBR_CSA_REG 0x10C /* CSA pointer (3 bytes) */ +#define WILC_SDIO_FBR_DATA_REG 0x10F + +#define WILC_SDIO_F1_DATA_REG 0x0 +#define WILC_SDIO_EXT_IRQ_FLAG_REG 0x4 + #define WILC_AHB_DATA_MEM_BASE 0x30000 #define WILC_AHB_SHARE_MEM_BASE 0xd0000 @@ -112,6 +158,32 @@ #define WILC_HAVE_DISABLE_WILC_UART BIT(7) #define WILC_HAVE_USE_IRQ_AS_HOST_WAKE BIT(8) +#define WILC_CORTUS_INTERRUPT_BASE 0x10A8 +#define WILC_CORTUS_INTERRUPT_1 (WILC_CORTUS_INTERRUPT_BASE + 0x4) +#define WILC_CORTUS_INTERRUPT_2 (WILC_CORTUS_INTERRUPT_BASE + 0x8) + +/* tx control register 1 to 4 for RX */ +#define WILC_REG_4_TO_1_RX 0x1e1c + +/* tx control register 1 to 4 for TX Bank_0 */ +#define WILC_REG_4_TO_1_TX_BANK0 0x1e9c + +#define WILC_CORTUS_RESET_MUX_SEL 0x1118 +#define WILC_CORTUS_BOOT_REGISTER 0xc0000 + +#define WILC_CORTUS_BOOT_FROM_IRAM 0x71 + +#define WILC_1000_BASE_ID 0x100000 + +#define WILC_1000_BASE_ID_2A 0x1002A0 +#define WILC_1000_BASE_ID_2A_REV1 (WILC_1000_BASE_ID_2A + 1) + +#define WILC_1000_BASE_ID_2B 0x1002B0 +#define WILC_1000_BASE_ID_2B_REV1 (WILC_1000_BASE_ID_2B + 1) +#define WILC_1000_BASE_ID_2B_REV2 (WILC_1000_BASE_ID_2B + 2) + +#define WILC_CHIP_REV_FIELD GENMASK(11, 0) + /******************************************** * * Wlan Defines @@ -134,7 +206,23 @@ #define WILC_TX_BUFF_SIZE (64 * 1024) #define MODALIAS "WILC_SPI" -#define GPIO_NUM 0x44 + +#define WILC_PKT_HDR_CONFIG_FIELD BIT(31) +#define WILC_PKT_HDR_OFFSET_FIELD GENMASK(30, 22) +#define WILC_PKT_HDR_TOTAL_LEN_FIELD GENMASK(21, 11) +#define WILC_PKT_HDR_LEN_FIELD GENMASK(10, 0) + +#define WILC_INTERRUPT_DATA_SIZE GENMASK(14, 0) + +#define WILC_VMM_BUFFER_SIZE GENMASK(9, 0) + +#define WILC_VMM_HDR_TYPE BIT(31) +#define WILC_VMM_HDR_MGMT_FIELD BIT(30) +#define WILC_VMM_HDR_PKT_SIZE GENMASK(29, 15) +#define WILC_VMM_HDR_BUFF_SIZE GENMASK(14, 0) + +#define WILC_VMM_ENTRY_COUNT GENMASK(8, 3) +#define WILC_VMM_ENTRY_AVAILABLE BIT(2) /*******************************************/ /* E0 and later Interrupt flags. */ /*******************************************/ @@ -150,14 +238,16 @@ /* 21: INT5 flag */ /*******************************************/ #define IRG_FLAGS_OFFSET 16 -#define IRQ_DMA_WD_CNT_MASK ((1ul << IRG_FLAGS_OFFSET) - 1) +#define IRQ_DMA_WD_CNT_MASK GENMASK(IRG_FLAGS_OFFSET - 1, 0) #define INT_0 BIT(IRG_FLAGS_OFFSET) #define INT_1 BIT(IRG_FLAGS_OFFSET + 1) #define INT_2 BIT(IRG_FLAGS_OFFSET + 2) #define INT_3 BIT(IRG_FLAGS_OFFSET + 3) #define INT_4 BIT(IRG_FLAGS_OFFSET + 4) #define INT_5 BIT(IRG_FLAGS_OFFSET + 5) -#define MAX_NUM_INT 6 +#define MAX_NUM_INT 5 +#define IRG_FLAGS_MASK GENMASK(IRG_FLAGS_OFFSET + MAX_NUM_INT, \ + IRG_FLAGS_OFFSET) /*******************************************/ /* E0 and later Interrupt flags. */ @@ -185,6 +275,7 @@ #define DATA_INT_EXT INT_0 #define ALL_INT_EXT DATA_INT_EXT #define NUM_INT_EXT 1 +#define UNHANDLED_IRQ_MASK GENMASK(MAX_NUM_INT - 1, NUM_INT_EXT) #define DATA_INT_CLR CLR_INT0 diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h index bdd7f414fdbb..88e894dd3568 100644 --- a/drivers/staging/wlan-ng/hfa384x.h +++ b/drivers/staging/wlan-ng/hfa384x.h @@ -355,7 +355,7 @@ /* Commonly used basic types */ struct hfa384x_bytestr { __le16 len; - u8 data[0]; + u8 data[]; } __packed; struct hfa384x_bytestr32 { @@ -421,7 +421,7 @@ struct hfa384x_authenticate_station_data { /*-- Configuration Record: WPAData (data portion only) --*/ struct hfa384x_wpa_data { __le16 datalen; - u8 data[0]; /* max 80 */ + u8 data[]; /* max 80 */ } __packed; /*-------------------------------------------------------------------- diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index b71756ab0394..fa1bf8b069fd 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3254,13 +3254,16 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) struct p80211_rxmeta *rxmeta; u16 data_len; u16 fc; + u16 status; /* Byte order convert once up front. */ le16_to_cpus(&usbin->rxfrm.desc.status); le32_to_cpus(&usbin->rxfrm.desc.time); /* Now handle frame based on port# */ - switch (HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status)) { + status = HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status); + + switch (status) { case 0: fc = le16_to_cpu(usbin->rxfrm.desc.frame_control); @@ -3317,8 +3320,9 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) break; default: - netdev_warn(hw->wlandev->netdev, "Received frame on unsupported port=%d\n", - HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status)); + netdev_warn(hw->wlandev->netdev, + "Received frame on unsupported port=%d\n", + status); break; } } @@ -3372,6 +3376,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) { pr_debug("overlen frm: len=%zd\n", skblen - sizeof(struct p80211_caphdr)); + + return; } skb = dev_alloc_skb(skblen); diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h index ac254542fde6..3dcdd022da61 100644 --- a/drivers/staging/wlan-ng/p80211types.h +++ b/drivers/staging/wlan-ng/p80211types.h @@ -204,7 +204,7 @@ struct p80211pstr { struct p80211pstrd { u8 len; - u8 data[0]; + u8 data[]; } __packed; /* Maximum pascal string */ @@ -249,7 +249,7 @@ struct p80211itemd { u32 did; u16 status; u16 len; - u8 data[0]; + u8 data[]; } __packed; /* message data item for int, BOUNDEDINT, ENUMINT */ diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 352556f6870a..4689b2170e4f 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -180,6 +180,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface) cancel_work_sync(&hw->link_bh); cancel_work_sync(&hw->commsqual_bh); + cancel_work_sync(&hw->usb_work); /* Now we complete any outstanding commands * and tell everyone who is waiting for their diff --git a/drivers/staging/wusbcore/Documentation/wusb-cbaf b/drivers/staging/wusbcore/Documentation/wusb-cbaf deleted file mode 100644 index 8b3d43efce90..000000000000 --- a/drivers/staging/wusbcore/Documentation/wusb-cbaf +++ /dev/null @@ -1,130 +0,0 @@ -#! /bin/bash -# - -set -e - -progname=$(basename $0) -function help -{ - cat <<EOF -Usage: $progname COMMAND DEVICEs [ARGS] - -Command for manipulating the pairing/authentication credentials of a -Wireless USB device that supports wired-mode Cable-Based-Association. - -Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org. - - -DEVICE - - sysfs path to the device to authenticate; for example, both this - guys are the same: - - /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1 - /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1 - -COMMAND/ARGS are - - start - - Start a WUSB host controller (by setting up a CHID) - - set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME - - Sets host information in the device; after this you can call the - get-cdid to see how does this device report itself to us. - - get-cdid DEVICE - - Get the device ID associated to the HOST-CHID we sent with - 'set-chid'. We might not know about it. - - set-cc DEVICE - - If we allow the device to connect, set a random new CDID and CK - (connection key). Device saves them for the next time it wants to - connect wireless. We save them for that next time also so we can - authenticate the device (when we see the CDID he uses to id - itself) and the CK to crypto talk to it. - -CHID is always 16 hex bytes in 'XX YY ZZ...' form -BANDGROUP is almost always 0001 - -Examples: - - You can default most arguments to '' to get a sane value: - - $ $progname set-chid '' '' '' "My host name" - - A full sequence: - - $ $progname set-chid '' '' '' "My host name" - $ $progname get-cdid '' - $ $progname set-cc '' - -EOF -} - - -# Defaults -# FIXME: CHID should come from a database :), band group from the host -host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff" -host_band_group="0001" -host_name=$(hostname) - -devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)" -hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)" - -result=0 -case $1 in - start) - for dev in ${2:-$hdevs} - do - echo $host_CHID > $dev/wusb_chid - echo I: started host $(basename $dev) >&2 - done - ;; - stop) - for dev in ${2:-$hdevs} - do - echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid - echo I: stopped host $(basename $dev) >&2 - done - ;; - set-chid) - shift - for dev in ${2:-$devs}; do - echo "${4:-$host_name}" > $dev/wusb_host_name - echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups - echo ${2:-$host_CHID} > $dev/wusb_chid - done - ;; - get-cdid) - for dev in ${2:-$devs} - do - cat $dev/wusb_cdid - done - ;; - set-cc) - for dev in ${2:-$devs}; do - shift - CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)" - CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)" - echo "$CDID" > $dev/wusb_cdid - echo "$CK" > $dev/wusb_ck - - echo I: CC set >&2 - echo "CHID: $(cat $dev/wusb_chid)" - echo "CDID:$CDID" - echo "CK: $CK" - done - ;; - help|h|--help|-h) - help - ;; - *) - echo "E: Unknown usage" 1>&2 - help 1>&2 - result=1 -esac -exit $result diff --git a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst deleted file mode 100644 index dc5e21609bb5..000000000000 --- a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst +++ /dev/null @@ -1,457 +0,0 @@ -================================ -Linux UWB + Wireless USB + WiNET -================================ - - Copyright (C) 2005-2006 Intel Corporation - - Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License version - 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - - -Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for -updated content. - - * Design-overview.txt-1.8 - -This code implements a Ultra Wide Band stack for Linux, as well as -drivers for the USB based UWB radio controllers defined in the -Wireless USB 1.0 specification (including Wireless USB host controller -and an Intel WiNET controller). - -.. Contents - 1. Introduction - 1. HWA: Host Wire adapters, your Wireless USB dongle - - 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired - devices - 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host - adapter - 2. The UWB stack - 1. Devices and hosts: the basic structure - - 2. Host Controller life cycle - - 3. On the air: beacons and enumerating the radio neighborhood - - 4. Device lists - 5. Bandwidth allocation - - 3. Wireless USB Host Controller drivers - - 4. Glossary - - -Introduction -============ - -UWB is a wide-band communication protocol that is to serve also as the -low-level protocol for others (much like TCP sits on IP). Currently -these others are Wireless USB and TCP/IP, but seems Bluetooth and -Firewire/1394 are coming along. - -UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of -~-41dB (or 0.074 uW/MHz--geography specific data is still being -negotiated w/ regulators, so watch for changes). That band is divided in -a bunch of ~1.5 GHz wide channels (or band groups) composed of three -subbands/subchannels (528 MHz each). Each channel is independent of each -other, so you could consider them different "busses". Initially this -driver considers them all a single one. - -Radio time is divided in 65536 us long /superframes/, each one divided -in 256 256us long /MASs/ (Media Allocation Slots), which are the basic -time/media allocation units for transferring data. At the beginning of -each superframe there is a Beacon Period (BP), where every device -transmit its beacon on a single MAS. The length of the BP depends on how -many devices are present and the length of their beacons. - -Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16 -bit address) and send periodic beacons to advertise themselves and pass -info on what they are and do. They advertise their capabilities and a -bunch of other stuff. - -The different logical parts of this driver are: - - * - - *UWB*: the Ultra-Wide-Band stack -- manages the radio and - associated spectrum to allow for devices sharing it. Allows to - control bandwidth assignment, beaconing, scanning, etc - - * - - *WUSB*: the layer that sits on top of UWB to provide Wireless USB. - The Wireless USB spec defines means to control a UWB radio and to - do the actual WUSB. - - -HWA: Host Wire adapters, your Wireless USB dongle -------------------------------------------------- - -WUSB also defines a device called a Host Wire Adaptor (HWA), which in -mere terms is a USB dongle that enables your PC to have UWB and Wireless -USB. The Wireless USB Host Controller in a HWA looks to the host like a -[Wireless] USB controller connected via USB (!) - -The HWA itself is broken in two or three main interfaces: - - * - - *RC*: Radio control -- this implements an interface to the - Ultra-Wide-Band radio controller. The driver for this implements a - USB-based UWB Radio Controller to the UWB stack. - - * - - *HC*: the wireless USB host controller. It looks like a USB host - whose root port is the radio and the WUSB devices connect to it. - To the system it looks like a separate USB host. The driver (will) - implement a USB host controller (similar to UHCI, OHCI or EHCI) - for which the root hub is the radio...To reiterate: it is a USB - controller that is connected via USB instead of PCI. - - * - - *WINET*: some HW provide a WiNET interface (IP over UWB). This - package provides a driver for it (it looks like a network - interface, winetX). The driver detects when there is a link up for - their type and kick into gear. - - -DWA: Device Wired Adaptor, a Wireless USB hub for wired devices ---------------------------------------------------------------- - -These are the complement to HWAs. They are a USB host for connecting -wired devices, but it is connected to your PC connected via Wireless -USB. To the system it looks like yet another USB host. To the untrained -eye, it looks like a hub that connects upstream wirelessly. - -We still offer no support for this; however, it should share a lot of -code with the HWA-RC driver; there is a bunch of factorization work that -has been done to support that in upcoming releases. - - -WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter -------------------------------------------------------------------- - -This is your usual PCI device that implements WHCI. Similar in concept -to EHCI, it allows your wireless USB devices (including DWAs) to connect -to your host via a PCI interface. As in the case of the HWA, it has a -Radio Control interface and the WUSB Host Controller interface per se. - -There is still no driver support for this, but will be in upcoming -releases. - - -The UWB stack -============= - -The main mission of the UWB stack is to keep a tally of which devices -are in radio proximity to allow drivers to connect to them. As well, it -provides an API for controlling the local radio controllers (RCs from -now on), such as to start/stop beaconing, scan, allocate bandwidth, etc. - - -Devices and hosts: the basic structure --------------------------------------- - -The main building block here is the UWB device (struct uwb_dev). For -each device that pops up in radio presence (ie: the UWB host receives a -beacon from it) you get a struct uwb_dev that will show up in -/sys/bus/uwb/devices. - -For each RC that is detected, a new struct uwb_rc and struct uwb_dev are -created. An entry is also created in /sys/class/uwb_rc for each RC. - -Each RC driver is implemented by a separate driver that plugs into the -interface that the UWB stack provides through a struct uwb_rc_ops. The -spec creators have been nice enough to make the message format the same -for HWA and WHCI RCs, so the driver is really a very thin transport that -moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/] -and sends the replies and notifications back to the API -[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that -is chartered, among other things, to keep the tab of how the UWB radio -neighborhood looks, creating and destroying devices as they show up or -disappear. - -Command execution is very simple: a command block is sent and a event -block or reply is expected back. For sending/receiving command/events, a -handle called /neh/ (Notification/Event Handle) is opened with -/uwb_rc_neh_open()/. - -The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for -the USB connected HWA. Eventually, drivers/whci-rc.c will do the same -for the PCI connected WHCI controller. - - -Host Controller life cycle --------------------------- - -So let's say we connect a dongle to the system: it is detected and -firmware uploaded if needed [for Intel's i1480 -/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated. -Now we have a real HWA device connected and -/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the -Wire-Adaptor environment and then suck it into the UWB stack's vision of -the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/]. - - * - - [*] The stack should put a new RC to scan for devices - [/uwb_rc_scan()/] so it finds what's available around and tries to - connect to them, but this is policy stuff and should be driven - from user space. As of now, the operator is expected to do it - manually; see the release notes for documentation on the procedure. - -When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/ -takes time of tearing everything down safely (or not...). - - -On the air: beacons and enumerating the radio neighborhood ----------------------------------------------------------- - -So assuming we have devices and we have agreed for a channel to connect -on (let's say 9), we put the new RC to beacon: - - * - - $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon - -Now it is visible. If there were other devices in the same radio channel -and beacon group (that's what the zero is for), the dongle's radio -control interface will send beacon notifications on its -notification/event endpoint (NEEP). The beacon notifications are part of -the event stream that is funneled into the API with -/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB -daemon through a notification list. - -UWBD wakes up and scans the event list; finds a beacon and adds it to -the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from -the same device, he considers it to be 'onair' and creates a new device -[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons -are received in some time, the device is considered gone and wiped out -[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge -the beacon cache of dead devices]. - - -Device lists ------------- - -All UWB devices are kept in the list of the struct bus_type uwb_bus_type. - - -Bandwidth allocation --------------------- - -The UWB stack maintains a local copy of DRP availability through -processing of incoming *DRP Availability Change* notifications. This -local copy is currently used to present the current bandwidth -availability to the user through the sysfs file -/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth -availability information will be used by the bandwidth reservation -routines. - -The bandwidth reservation routines are in progress and are thus not -present in the current release. When completed they will enable a user -to initiate DRP reservation requests through interaction with sysfs. DRP -reservation requests from remote UWB devices will also be handled. The -bandwidth management done by the UWB stack will include callbacks to the -higher layers will enable the higher layers to use the reservations upon -completion. [Note: The bandwidth reservation work is in progress and -subject to change.] - - -Wireless USB Host Controller drivers -==================================== - -*WARNING* This section needs a lot of work! - -As explained above, there are three different types of HCs in the WUSB -world: HWA-HC, DWA-HC and WHCI-HC. - -HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB -connected controllers), and their transfer management system is almost -identical. So is their notification delivery system. - -HWA-HC and WHCI-HC share that they are both WUSB host controllers, so -they have to deal with WUSB device life cycle and maintenance, wireless -root-hub - -HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has -three endpoints (Notifications, Data Transfer In and Data Transfer -Out--known as NEP, DTI and DTO in the code). - -We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster -ID and tell the HC to use all that. Then we start it. This means the HC -starts sending MMCs. - - * - - The MMCs are blocks of data defined somewhere in the WUSB1.0 spec - that define a stream in the UWB channel time allocated for sending - WUSB IEs (host to device commands/notifications) and Device - Notifications (device initiated to host). Each host defines a - unique Wireless USB cluster through MMCs. Devices can connect to a - single cluster at the time. The IEs are Information Elements, and - among them are the bandwidth allocations that tell each device - when can they transmit or receive. - -Now it all depends on external stimuli. - -New device connection ---------------------- - -A new device pops up, it scans the radio looking for MMCs that give out -the existence of Wireless USB channels. Once one (or more) are found, -selects which one to connect to. Sends a /DN_Connect/ (device -notification connect) during the DNTS (Device Notification Time -Slot--announced in the MMCs - -HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery -into /devconnect/). This process starts the authentication process for -the device. First we allocate a /fake port/ and assign an -unauthenticated address (128 to 255--what we really do is -0x80 | fake_port_idx). We fiddle with the fake port status and /hub_wq/ -sees a new connection, so he moves on to enable the fake port with a reset. - -So now we are in the reset path -- we know we have a non-yet enumerated -device with an unauthorized address; we ask user space to authenticate -(FIXME: not yet done, similar to bluetooth pairing), then we do the key -exchange (FIXME: not yet done) and issue a /set address 0/ to bring the -device to the default state. Device is authenticated. - -From here, the USB stack takes control through the usb_hcd ops. hub_wq -has seen the port status changes, as we have been toggling them. It will -start enumerating and doing transfers through usb_hcd->urb_enqueue() to -read descriptors and move our data. - -Device life cycle and keep alives ---------------------------------- - -Every time there is a successful transfer to/from a device, we update a -per-device activity timestamp. If not, every now and then we check and -if the activity timestamp gets old, we ping the device by sending it a -Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this -arrives to us as a notification through -devconnect.c:wusb_handle_dn_alive(). If a device times out, we -disconnect it from the system (cleaning up internal information and -toggling the bits in the fake hub port, which kicks hub_wq into removing -the rest of the stuff). - -This is done through devconnect:__wusb_check_devs(), which will scan the -device list looking for whom needs refreshing. - -If the device wants to disconnect, it will either die (ugly) or send a -/DN_Disconnect/ that will prompt a disconnection from the system. - -Sending and receiving data --------------------------- - -Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is -/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and -DWAs. - -Each HC has a number of rpipes and buffers that can be assigned to them; -when doing a data transfer (xfer), first the rpipe has to be aimed and -prepared (buffers assigned), then we can start queueing requests for -data in or out. - -Data buffers have to be segmented out before sending--so we send first a -header (segment request) and then if there is any data, a data buffer -immediately after to the DTI interface (yep, even the request). If our -buffer is bigger than the max segment size, then we just do multiple -requests. - -[This sucks, because doing USB scatter gatter in Linux is resource -intensive, if any...not that the current approach is not. It just has to -be cleaned up a lot :)]. - -If reading, we don't send data buffers, just the segment headers saying -we want to read segments. - -When the xfer is executed, we receive a notification that says data is -ready in the DTI endpoint (handled through -xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a -descriptor that gives us the status of the transfer, its identification -(given when we issued it) and the segment number. If it was a data read, -we issue another URB to read into the destination buffer the chunk of -data coming out of the remote endpoint. Done, wait for the next guy. The -callbacks for the URBs issued from here are the ones that will declare -the xfer complete at some point and call its callback. - -Seems simple, but the implementation is not trivial. - - * - - *WARNING* Old!! - -The main xfer descriptor, wa_xfer (equivalent to a URB) contains an -array of segments, tallys on segments and buffers and callback -information. Buried in there is a lot of URBs for executing the segments -and buffer transfers. - -For OUT xfers, there is an array of segments, one URB for each, another -one of buffer URB. When submitting, we submit URBs for segment request -1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer -result data; when all the segments are complete, we call the callback to -finalize the transfer. - -For IN xfers, we only issue URBs for the segments we want to read and -then wait for the xfer result data. - -URB mapping into xfers -^^^^^^^^^^^^^^^^^^^^^^ - -This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an -rpipe to the endpoint where we have to transmit, create a transfer -context (wa_xfer) and submit it. When the xfer is done, our callback is -called and we assign the status bits and release the xfer resources. - -In dequeue() we are basically cancelling/aborting the transfer. We issue -a xfer abort request to the HC, cancel all the URBs we had submitted -and not yet done and when all that is done, the xfer callback will be -called--this will call the URB callback. - - -Glossary -======== - -*DWA* -- Device Wire Adapter - -USB host, wired for downstream devices, upstream connects wirelessly -with Wireless USB. - -*EVENT* -- Response to a command on the NEEP - -*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB - -*NEH* -- Notification/Event Handle - -Handle/file descriptor for receiving notifications or events. The WA -code requires you to get one of this to listen for notifications or -events on the NEEP. - -*NEEP* -- Notification/Event EndPoint - -Stuff related to the management of the first endpoint of a HWA USB -dongle that is used to deliver an stream of events and notifications to -the host. - -*NOTIFICATION* -- Message coming in the NEEP as response to something. - -*RC* -- Radio Control - -Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by -InakyPerezGonzalez) diff --git a/drivers/staging/wusbcore/Kconfig b/drivers/staging/wusbcore/Kconfig deleted file mode 100644 index a559d023b508..000000000000 --- a/drivers/staging/wusbcore/Kconfig +++ /dev/null @@ -1,39 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Wireless USB Core configuration -# -config USB_WUSB - tristate "Enable Wireless USB extensions" - depends on UWB && USB - select CRYPTO - select CRYPTO_AES - select CRYPTO_CCM - help - Enable the host-side support for Wireless USB. - - To compile this support select Y (built in). It is safe to - select even if you don't have the hardware. - -config USB_WUSB_CBAF - tristate "Support WUSB Cable Based Association (CBA)" - depends on USB - help - Some WUSB devices support Cable Based Association. It's used to - enable the secure communication between the host and the - device. - - Enable this option if your WUSB device must to be connected - via wired USB before establishing a wireless link. - - It is safe to select even if you don't have a compatible - hardware. - -config USB_WUSB_CBAF_DEBUG - bool "Enable CBA debug messages" - depends on USB_WUSB_CBAF - help - Say Y here if you want the CBA to produce a bunch of debug messages - to the system log. Select this if you are having a problem with - CBA support and want to see more of what is going on. - -source "drivers/staging/wusbcore/host/Kconfig" diff --git a/drivers/staging/wusbcore/Makefile b/drivers/staging/wusbcore/Makefile deleted file mode 100644 index b47b874268ac..000000000000 --- a/drivers/staging/wusbcore/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG - -obj-$(CONFIG_USB_WUSB) += wusbcore.o -obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o -obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o - - -wusbcore-y := \ - crypto.o \ - devconnect.o \ - dev-sysfs.o \ - mmc.o \ - pal.o \ - rh.o \ - reservation.o \ - security.o \ - wusbhc.o - -wusb-cbaf-y := cbaf.o - -wusb-wa-y := \ - wa-hc.o \ - wa-nep.o \ - wa-rpipe.o \ - wa-xfer.o - -obj-y += host/ diff --git a/drivers/staging/wusbcore/TODO b/drivers/staging/wusbcore/TODO deleted file mode 100644 index abae57000534..000000000000 --- a/drivers/staging/wusbcore/TODO +++ /dev/null @@ -1,8 +0,0 @@ -TODO: Remove in late 2019 unless there are users - -There seems to not be any real wireless USB devices anywhere in the wild -anymore. It turned out to be a failed technology :( - -This will be removed from the tree if no one objects. - -Greg Kroah-Hartman <gregkh@linuxfoundation.org> diff --git a/drivers/staging/wusbcore/cbaf.c b/drivers/staging/wusbcore/cbaf.c deleted file mode 100644 index 57062eaf7558..000000000000 --- a/drivers/staging/wusbcore/cbaf.c +++ /dev/null @@ -1,645 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB - Cable Based Association - * - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - * - * WUSB devices have to be paired (associated in WUSB lingo) so - * that they can connect to the system. - * - * One way of pairing is using CBA-Cable Based Association. First - * time you plug the device with a cable, association is done between - * host and device and subsequent times, you can connect wirelessly - * without having to associate again. That's the idea. - * - * This driver does nothing Earth shattering. It just provides an - * interface to chat with the wire-connected device so we can get a - * CDID (device ID) that might have been previously associated to a - * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet - * (connection context), with the CK being the secret, or connection - * key. This is the pairing data. - * - * When a device with the CBA capability connects, the probe routine - * just creates a bunch of sysfs files that a user space enumeration - * manager uses to allow it to connect wirelessly to the system or not. - * - * The process goes like this: - * - * 1. Device plugs, cbaf is loaded, notifications happen. - * - * 2. The connection manager (CM) sees a device with CBAF capability - * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). - * - * 3. The CM writes the host name, supported band groups, and the CHID - * (host ID) into the wusb_host_name, wusb_host_band_groups and - * wusb_chid files. These get sent to the device and the CDID (if - * any) for this host is requested. - * - * 4. The CM can verify that the device's supported band groups - * (wusb_device_band_groups) are compatible with the host. - * - * 5. The CM reads the wusb_cdid file. - * - * 6. The CM looks up its database - * - * 6.1 If it has a matching CHID,CDID entry, the device has been - * authorized before (paired) and nothing further needs to be - * done. - * - * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in - * its database), the device is assumed to be not known. The CM - * may associate the host with device by: writing a randomly - * generated CDID to wusb_cdid and then a random CK to wusb_ck - * (this uploads the new CC to the device). - * - * CMD may choose to prompt the user before associating with a new - * device. - * - * 7. Device is unplugged. - * - * When the device tries to connect wirelessly, it will present its - * CDID to the WUSB host controller. The CM will query the - * database. If the CHID/CDID pair found, it will (with a 4-way - * handshake) challenge the device to demonstrate it has the CK secret - * key (from our database) without actually exchanging it. Once - * satisfied, crypto keys are derived from the CK, the device is - * connected and all communication is encrypted. - * - * References: - * [WUSB-AM] Association Models Supplement to the Certified Wireless - * Universal Serial Bus Specification, version 1.0. - */ -#include <linux/module.h> -#include <linux/ctype.h> -#include <linux/usb.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/random.h> -#include <linux/slab.h> -#include <linux/mutex.h> -#include "../uwb/uwb.h" -#include "include/wusb.h" -#include "include/association.h" - -#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */ - -/* An instance of a Cable-Based-Association-Framework device */ -struct cbaf { - struct usb_device *usb_dev; - struct usb_interface *usb_iface; - void *buffer; - size_t buffer_size; - - struct wusb_ckhdid chid; - char host_name[CBA_NAME_LEN]; - u16 host_band_groups; - - struct wusb_ckhdid cdid; - char device_name[CBA_NAME_LEN]; - u16 device_band_groups; - - struct wusb_ckhdid ck; -}; - -/* - * Verify that a CBAF USB-interface has what we need - * - * According to [WUSB-AM], CBA devices should provide at least two - * interfaces: - * - RETRIEVE_HOST_INFO - * - ASSOCIATE - * - * If the device doesn't provide these interfaces, we do not know how - * to deal with it. - */ -static int cbaf_check(struct cbaf *cbaf) -{ - int result; - struct device *dev = &cbaf->usb_iface->dev; - struct wusb_cbaf_assoc_info *assoc_info; - struct wusb_cbaf_assoc_request *assoc_request; - size_t assoc_size; - void *itr, *top; - int ar_rhi = 0, ar_assoc = 0; - - result = usb_control_msg( - cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), - CBAF_REQ_GET_ASSOCIATION_INFORMATION, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, - cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Cannot get available association types: %d\n", - result); - return result; - } - - assoc_info = cbaf->buffer; - if (result < sizeof(*assoc_info)) { - dev_err(dev, "Not enough data to decode association info " - "header (%zu vs %zu bytes required)\n", - (size_t)result, sizeof(*assoc_info)); - return result; - } - - assoc_size = le16_to_cpu(assoc_info->Length); - if (result < assoc_size) { - dev_err(dev, "Not enough data to decode association info " - "(%zu vs %zu bytes required)\n", - (size_t)assoc_size, sizeof(*assoc_info)); - return result; - } - /* - * From now on, we just verify, but won't error out unless we - * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE} - * types. - */ - itr = cbaf->buffer + sizeof(*assoc_info); - top = cbaf->buffer + assoc_size; - dev_dbg(dev, "Found %u association requests (%zu bytes)\n", - assoc_info->NumAssociationRequests, assoc_size); - - while (itr < top) { - u16 ar_type, ar_subtype; - u32 ar_size; - const char *ar_name; - - assoc_request = itr; - - if (top - itr < sizeof(*assoc_request)) { - dev_err(dev, "Not enough data to decode association " - "request (%zu vs %zu bytes needed)\n", - top - itr, sizeof(*assoc_request)); - break; - } - - ar_type = le16_to_cpu(assoc_request->AssociationTypeId); - ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId); - ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize); - ar_name = "unknown"; - - switch (ar_type) { - case AR_TYPE_WUSB: - /* Verify we have what is mandated by [WUSB-AM]. */ - switch (ar_subtype) { - case AR_TYPE_WUSB_RETRIEVE_HOST_INFO: - ar_name = "RETRIEVE_HOST_INFO"; - ar_rhi = 1; - break; - case AR_TYPE_WUSB_ASSOCIATE: - /* send assoc data */ - ar_name = "ASSOCIATE"; - ar_assoc = 1; - break; - } - break; - } - - dev_dbg(dev, "Association request #%02u: 0x%04x/%04x " - "(%zu bytes): %s\n", - assoc_request->AssociationDataIndex, ar_type, - ar_subtype, (size_t)ar_size, ar_name); - - itr += sizeof(*assoc_request); - } - - if (!ar_rhi) { - dev_err(dev, "Missing RETRIEVE_HOST_INFO association " - "request\n"); - return -EINVAL; - } - if (!ar_assoc) { - dev_err(dev, "Missing ASSOCIATE association request\n"); - return -EINVAL; - } - - return 0; -} - -static const struct wusb_cbaf_host_info cbaf_host_info_defaults = { - .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, - .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), - .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, - .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO), - .CHID_hdr = WUSB_AR_CHID, - .LangID_hdr = WUSB_AR_LangID, - .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName, -}; - -/* Send WUSB host information (CHID and name) to a CBAF device */ -static int cbaf_send_host_info(struct cbaf *cbaf) -{ - struct wusb_cbaf_host_info *hi; - size_t name_len; - size_t hi_size; - - hi = cbaf->buffer; - memset(hi, 0, sizeof(*hi)); - *hi = cbaf_host_info_defaults; - hi->CHID = cbaf->chid; - hi->LangID = 0; /* FIXME: I guess... */ - strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN); - name_len = strlen(cbaf->host_name); - hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len); - hi_size = sizeof(*hi) + name_len; - - return usb_control_msg(cbaf->usb_dev, - usb_sndctrlpipe(cbaf->usb_dev, 0), - CBAF_REQ_SET_ASSOCIATION_RESPONSE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0x0101, - cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, - hi, hi_size, USB_CTRL_SET_TIMEOUT); -} - -/* - * Get device's information (CDID) associated to CHID - * - * The device will return it's information (CDID, name, bandgroups) - * associated to the CHID we have set before, or 0 CDID and default - * name and bandgroup if no CHID set or unknown. - */ -static int cbaf_cdid_get(struct cbaf *cbaf) -{ - int result; - struct device *dev = &cbaf->usb_iface->dev; - struct wusb_cbaf_device_info *di; - size_t needed; - - di = cbaf->buffer; - result = usb_control_msg( - cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), - CBAF_REQ_GET_ASSOCIATION_REQUEST, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, - di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Cannot request device information: %d\n", - result); - return result; - } - - needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length); - if (result < needed) { - dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " - "%zu bytes needed)\n", (size_t)result, needed); - return -ENOENT; - } - - strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN); - cbaf->cdid = di->CDID; - cbaf->device_band_groups = le16_to_cpu(di->BandGroups); - - return 0; -} - -static ssize_t cbaf_wusb_chid_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return sprintf(buf, "%16ph\n", cbaf->chid.data); -} - -static ssize_t cbaf_wusb_chid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - result = sscanf(buf, - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx", - &cbaf->chid.data[0] , &cbaf->chid.data[1], - &cbaf->chid.data[2] , &cbaf->chid.data[3], - &cbaf->chid.data[4] , &cbaf->chid.data[5], - &cbaf->chid.data[6] , &cbaf->chid.data[7], - &cbaf->chid.data[8] , &cbaf->chid.data[9], - &cbaf->chid.data[10], &cbaf->chid.data[11], - &cbaf->chid.data[12], &cbaf->chid.data[13], - &cbaf->chid.data[14], &cbaf->chid.data[15]); - - if (result != 16) - return -EINVAL; - - result = cbaf_send_host_info(cbaf); - if (result < 0) - return result; - result = cbaf_cdid_get(cbaf); - if (result < 0) - return result; - return size; -} -static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store); - -static ssize_t cbaf_wusb_host_name_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name); -} - -static ssize_t cbaf_wusb_host_name_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - result = sscanf(buf, "%63s", cbaf->host_name); - if (result != 1) - return -EINVAL; - - return size; -} -static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show, - cbaf_wusb_host_name_store); - -static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups); -} - -static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - u16 band_groups = 0; - - result = sscanf(buf, "%04hx", &band_groups); - if (result != 1) - return -EINVAL; - - cbaf->host_band_groups = band_groups; - - return size; -} - -static DEVICE_ATTR(wusb_host_band_groups, 0600, - cbaf_wusb_host_band_groups_show, - cbaf_wusb_host_band_groups_store); - -static const struct wusb_cbaf_device_info cbaf_device_info_defaults = { - .Length_hdr = WUSB_AR_Length, - .CDID_hdr = WUSB_AR_CDID, - .BandGroups_hdr = WUSB_AR_BandGroups, - .LangID_hdr = WUSB_AR_LangID, - .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName, -}; - -static ssize_t cbaf_wusb_cdid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return sprintf(buf, "%16ph\n", cbaf->cdid.data); -} - -static ssize_t cbaf_wusb_cdid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - struct wusb_ckhdid cdid; - - result = sscanf(buf, - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx", - &cdid.data[0] , &cdid.data[1], - &cdid.data[2] , &cdid.data[3], - &cdid.data[4] , &cdid.data[5], - &cdid.data[6] , &cdid.data[7], - &cdid.data[8] , &cdid.data[9], - &cdid.data[10], &cdid.data[11], - &cdid.data[12], &cdid.data[13], - &cdid.data[14], &cdid.data[15]); - if (result != 16) - return -EINVAL; - - cbaf->cdid = cdid; - - return size; -} -static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store); - -static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups); -} - -static DEVICE_ATTR(wusb_device_band_groups, 0600, - cbaf_wusb_device_band_groups_show, - NULL); - -static ssize_t cbaf_wusb_device_name_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name); -} -static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL); - -static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = { - .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, - .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), - .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, - .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE), - .Length_hdr = WUSB_AR_Length, - .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)), - .ConnectionContext_hdr = WUSB_AR_ConnectionContext, - .BandGroups_hdr = WUSB_AR_BandGroups, -}; - -static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = { - .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, - .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, - .Length_hdr = WUSB_AR_Length, - .AssociationStatus_hdr = WUSB_AR_AssociationStatus, -}; - -/* - * Send a new CC to the device. - */ -static int cbaf_cc_upload(struct cbaf *cbaf) -{ - int result; - struct device *dev = &cbaf->usb_iface->dev; - struct wusb_cbaf_cc_data *ccd; - - ccd = cbaf->buffer; - *ccd = cbaf_cc_data_defaults; - ccd->CHID = cbaf->chid; - ccd->CDID = cbaf->cdid; - ccd->CK = cbaf->ck; - ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups); - - dev_dbg(dev, "Trying to upload CC:\n"); - dev_dbg(dev, " CHID %16ph\n", ccd->CHID.data); - dev_dbg(dev, " CDID %16ph\n", ccd->CDID.data); - dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups); - - result = usb_control_msg( - cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), - CBAF_REQ_SET_ASSOCIATION_RESPONSE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, - ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT); - - return result; -} - -static ssize_t cbaf_wusb_ck_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct usb_interface *iface = to_usb_interface(dev); - struct cbaf *cbaf = usb_get_intfdata(iface); - - result = sscanf(buf, - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx", - &cbaf->ck.data[0] , &cbaf->ck.data[1], - &cbaf->ck.data[2] , &cbaf->ck.data[3], - &cbaf->ck.data[4] , &cbaf->ck.data[5], - &cbaf->ck.data[6] , &cbaf->ck.data[7], - &cbaf->ck.data[8] , &cbaf->ck.data[9], - &cbaf->ck.data[10], &cbaf->ck.data[11], - &cbaf->ck.data[12], &cbaf->ck.data[13], - &cbaf->ck.data[14], &cbaf->ck.data[15]); - if (result != 16) - return -EINVAL; - - result = cbaf_cc_upload(cbaf); - if (result < 0) - return result; - - return size; -} -static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store); - -static struct attribute *cbaf_dev_attrs[] = { - &dev_attr_wusb_host_name.attr, - &dev_attr_wusb_host_band_groups.attr, - &dev_attr_wusb_chid.attr, - &dev_attr_wusb_cdid.attr, - &dev_attr_wusb_device_name.attr, - &dev_attr_wusb_device_band_groups.attr, - &dev_attr_wusb_ck.attr, - NULL, -}; - -static const struct attribute_group cbaf_dev_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = cbaf_dev_attrs, -}; - -static int cbaf_probe(struct usb_interface *iface, - const struct usb_device_id *id) -{ - struct cbaf *cbaf; - struct device *dev = &iface->dev; - int result = -ENOMEM; - - cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL); - if (cbaf == NULL) - goto error_kzalloc; - cbaf->buffer = kmalloc(512, GFP_KERNEL); - if (cbaf->buffer == NULL) - goto error_kmalloc_buffer; - - cbaf->buffer_size = 512; - cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface)); - cbaf->usb_iface = usb_get_intf(iface); - result = cbaf_check(cbaf); - if (result < 0) { - dev_err(dev, "This device is not WUSB-CBAF compliant and is not supported yet.\n"); - goto error_check; - } - - result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group); - if (result < 0) { - dev_err(dev, "Can't register sysfs attr group: %d\n", result); - goto error_create_group; - } - usb_set_intfdata(iface, cbaf); - return 0; - -error_create_group: -error_check: - usb_put_intf(iface); - usb_put_dev(cbaf->usb_dev); - kfree(cbaf->buffer); -error_kmalloc_buffer: - kfree(cbaf); -error_kzalloc: - return result; -} - -static void cbaf_disconnect(struct usb_interface *iface) -{ - struct cbaf *cbaf = usb_get_intfdata(iface); - struct device *dev = &iface->dev; - sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group); - usb_set_intfdata(iface, NULL); - usb_put_intf(iface); - usb_put_dev(cbaf->usb_dev); - kfree(cbaf->buffer); - /* paranoia: clean up crypto keys */ - kzfree(cbaf); -} - -static const struct usb_device_id cbaf_id_table[] = { - { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, - { }, -}; -MODULE_DEVICE_TABLE(usb, cbaf_id_table); - -static struct usb_driver cbaf_driver = { - .name = "wusb-cbaf", - .id_table = cbaf_id_table, - .probe = cbaf_probe, - .disconnect = cbaf_disconnect, -}; - -module_usb_driver(cbaf_driver); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Wireless USB Cable Based Association"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wusbcore/crypto.c b/drivers/staging/wusbcore/crypto.c deleted file mode 100644 index d7d55ed19a98..000000000000 --- a/drivers/staging/wusbcore/crypto.c +++ /dev/null @@ -1,441 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Ultra Wide Band - * AES-128 CCM Encryption - * - * Copyright (C) 2007 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * We don't do any encryption here; we use the Linux Kernel's AES-128 - * crypto modules to construct keys and payload blocks in a way - * defined by WUSB1.0[6]. Check the erratas, as typos are are patched - * there. - * - * Thanks a zillion to John Keys for his help and clarifications over - * the designed-by-a-committee text. - * - * So the idea is that there is this basic Pseudo-Random-Function - * defined in WUSB1.0[6.5] which is the core of everything. It works - * by tweaking some blocks, AES crypting them and then xoring - * something else with them (this seems to be called CBC(AES) -- can - * you tell I know jack about crypto?). So we just funnel it into the - * Linux Crypto API. - * - * We leave a crypto test module so we can verify that vectors match, - * every now and then. - * - * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I - * am learning a lot... - * - * Conveniently, some data structures that need to be - * funneled through AES are...16 bytes in size! - */ - -#include <crypto/aes.h> -#include <crypto/algapi.h> -#include <crypto/hash.h> -#include <crypto/skcipher.h> -#include <linux/crypto.h> -#include <linux/module.h> -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/scatterlist.h> -#include "../uwb/uwb.h" -#include "include/wusb.h" - -static int debug_crypto_verify; - -module_param(debug_crypto_verify, int, 0); -MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms"); - -static void wusb_key_dump(const void *buf, size_t len) -{ - print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1, - buf, len, 0); -} - -/* - * Block of data, as understood by AES-CCM - * - * The code assumes this structure is nothing but a 16 byte array - * (packed in a struct to avoid common mess ups that I usually do with - * arrays and enforcing type checking). - */ -struct aes_ccm_block { - u8 data[16]; -} __attribute__((packed)); - -/* - * Counter-mode Blocks (WUSB1.0[6.4]) - * - * According to CCM (or so it seems), for the purpose of calculating - * the MIC, the message is broken in N counter-mode blocks, B0, B1, - * ... BN. - * - * B0 contains flags, the CCM nonce and l(m). - * - * B1 contains l(a), the MAC header, the encryption offset and padding. - * - * If EO is nonzero, additional blocks are built from payload bytes - * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The - * padding is not xmitted. - */ - -/* WUSB1.0[T6.4] */ -struct aes_ccm_b0 { - u8 flags; /* 0x59, per CCM spec */ - struct aes_ccm_nonce ccm_nonce; - __be16 lm; -} __attribute__((packed)); - -/* WUSB1.0[T6.5] */ -struct aes_ccm_b1 { - __be16 la; - u8 mac_header[10]; - __le16 eo; - u8 security_reserved; /* This is always zero */ - u8 padding; /* 0 */ -} __attribute__((packed)); - -/* - * Encryption Blocks (WUSB1.0[6.4.4]) - * - * CCM uses Ax blocks to generate a keystream with which the MIC and - * the message's payload are encoded. A0 always encrypts/decrypts the - * MIC. Ax (x>0) are used for the successive payload blocks. - * - * The x is the counter, and is increased for each block. - */ -struct aes_ccm_a { - u8 flags; /* 0x01, per CCM spec */ - struct aes_ccm_nonce ccm_nonce; - __be16 counter; /* Value of x */ -} __attribute__((packed)); - -/* Scratch space for MAC calculations. */ -struct wusb_mac_scratch { - struct aes_ccm_b0 b0; - struct aes_ccm_b1 b1; - struct aes_ccm_a ax; -}; - -/* - * CC-MAC function WUSB1.0[6.5] - * - * Take a data string and produce the encrypted CBC Counter-mode MIC - * - * Note the names for most function arguments are made to (more or - * less) match those used in the pseudo-function definition given in - * WUSB1.0[6.5]. - * - * @tfm_cbc: CBC(AES) blkcipher handle (initialized) - * - * @tfm_aes: AES cipher handle (initialized) - * - * @mic: buffer for placing the computed MIC (Message Integrity - * Code). This is exactly 8 bytes, and we expect the buffer to - * be at least eight bytes in length. - * - * @key: 128 bit symmetric key - * - * @n: CCM nonce - * - * @a: ASCII string, 14 bytes long (I guess zero padded if needed; - * we use exactly 14 bytes). - * - * @b: data stream to be processed - * - * @blen: size of b... - * - * Still not very clear how this is done, but looks like this: we - * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with - * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we - * take the payload and divide it in blocks (16 bytes), xor them with - * the previous crypto result (16 bytes) and crypt it, repeat the next - * block with the output of the previous one, rinse wash. So we use - * the CBC-MAC(AES) shash, that does precisely that. The IV (Initial - * Vector) is 16 bytes and is set to zero, so - * - * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and - * using the 14 bytes of @a to fill up - * b1.{mac_header,e0,security_reserved,padding}. - * - * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of - * l(m) is orthogonal, they bear no relationship, so it is not - * in conflict with the parameter's relation that - * WUSB1.0[6.4.2]) defines. - * - * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in - * first errata released on 2005/07. - * - * NOTE: we need to clean IV to zero at each invocation to make sure - * we start with a fresh empty Initial Vector, so that the CBC - * works ok. - * - * NOTE: blen is not aligned to a block size, we'll pad zeros, that's - * what sg[4] is for. Maybe there is a smarter way to do this. - */ -static int wusb_ccm_mac(struct crypto_shash *tfm_cbcmac, - struct wusb_mac_scratch *scratch, - void *mic, - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, const void *b, - size_t blen) -{ - SHASH_DESC_ON_STACK(desc, tfm_cbcmac); - u8 iv[AES_BLOCK_SIZE]; - - /* - * These checks should be compile time optimized out - * ensure @a fills b1's mac_header and following fields - */ - BUILD_BUG_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la)); - BUILD_BUG_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block)); - BUILD_BUG_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block)); - BUILD_BUG_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block)); - - /* Setup B0 */ - scratch->b0.flags = 0x59; /* Format B0 */ - scratch->b0.ccm_nonce = *n; - scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ - - /* Setup B1 - * - * The WUSB spec is anything but clear! WUSB1.0[6.5] - * says that to initialize B1 from A with 'l(a) = blen + - * 14'--after clarification, it means to use A's contents - * for MAC Header, EO, sec reserved and padding. - */ - scratch->b1.la = cpu_to_be16(blen + 14); - memcpy(&scratch->b1.mac_header, a, sizeof(*a)); - - desc->tfm = tfm_cbcmac; - crypto_shash_init(desc); - crypto_shash_update(desc, (u8 *)&scratch->b0, sizeof(scratch->b0) + - sizeof(scratch->b1)); - crypto_shash_finup(desc, b, blen, iv); - - /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] - * The procedure is to AES crypt the A0 block and XOR the MIC - * Tag against it; we only do the first 8 bytes and place it - * directly in the destination buffer. - */ - scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */ - scratch->ax.ccm_nonce = *n; - scratch->ax.counter = 0; - - /* reuse the CBC-MAC transform to perform the single block encryption */ - crypto_shash_digest(desc, (u8 *)&scratch->ax, sizeof(scratch->ax), - (u8 *)&scratch->ax); - - crypto_xor_cpy(mic, (u8 *)&scratch->ax, iv, 8); - - return 8; -} - -/* - * WUSB Pseudo Random Function (WUSB1.0[6.5]) - * - * @b: buffer to the source data; cannot be a global or const local - * (will confuse the scatterlists) - */ -ssize_t wusb_prf(void *out, size_t out_size, - const u8 key[16], const struct aes_ccm_nonce *_n, - const struct aes_ccm_label *a, - const void *b, size_t blen, size_t len) -{ - ssize_t result, bytes = 0, bitr; - struct aes_ccm_nonce n = *_n; - struct crypto_shash *tfm_cbcmac; - struct wusb_mac_scratch scratch; - u64 sfn = 0; - __le64 sfn_le; - - tfm_cbcmac = crypto_alloc_shash("cbcmac(aes)", 0, 0); - if (IS_ERR(tfm_cbcmac)) { - result = PTR_ERR(tfm_cbcmac); - printk(KERN_ERR "E: can't load CBCMAC-AES: %d\n", (int)result); - goto error_alloc_cbcmac; - } - - result = crypto_shash_setkey(tfm_cbcmac, key, AES_BLOCK_SIZE); - if (result < 0) { - printk(KERN_ERR "E: can't set CBCMAC-AES key: %d\n", (int)result); - goto error_setkey_cbcmac; - } - - for (bitr = 0; bitr < (len + 63) / 64; bitr++) { - sfn_le = cpu_to_le64(sfn++); - memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ - result = wusb_ccm_mac(tfm_cbcmac, &scratch, out + bytes, - &n, a, b, blen); - if (result < 0) - goto error_ccm_mac; - bytes += result; - } - result = bytes; - -error_ccm_mac: -error_setkey_cbcmac: - crypto_free_shash(tfm_cbcmac); -error_alloc_cbcmac: - return result; -} - -/* WUSB1.0[A.2] test vectors */ -static const u8 stv_hsmic_key[16] = { - 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, - 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f -}; - -static const struct aes_ccm_nonce stv_hsmic_n = { - .sfn = { 0 }, - .tkid = { 0x76, 0x98, 0x01, }, - .dest_addr = { .data = { 0xbe, 0x00 } }, - .src_addr = { .data = { 0x76, 0x98 } }, -}; - -/* - * Out-of-band MIC Generation verification code - * - */ -static int wusb_oob_mic_verify(void) -{ - int result; - u8 mic[8]; - /* WUSB1.0[A.2] test vectors */ - static const struct usb_handshake stv_hsmic_hs = { - .bMessageNumber = 2, - .bStatus = 00, - .tTKID = { 0x76, 0x98, 0x01 }, - .bReserved = 00, - .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, - 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, - 0x3c, 0x3d, 0x3e, 0x3f }, - .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, - 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, - 0x2c, 0x2d, 0x2e, 0x2f }, - .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c, - 0x14, 0x7b }, - }; - size_t hs_size; - - result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs); - if (result < 0) - printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result); - else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) { - printk(KERN_ERR "E: OOB MIC test: " - "mismatch between MIC result and WUSB1.0[A2]\n"); - hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); - printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); - wusb_key_dump(&stv_hsmic_hs, hs_size); - printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", - sizeof(stv_hsmic_n)); - wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n)); - printk(KERN_ERR "E: MIC out:\n"); - wusb_key_dump(mic, sizeof(mic)); - printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); - wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); - result = -EINVAL; - } else - result = 0; - return result; -} - -/* - * Test vectors for Key derivation - * - * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1] - * (errata corrected in 2005/07). - */ -static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = { - 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, - 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f -}; - -static const struct aes_ccm_nonce stv_keydvt_n_a1 = { - .sfn = { 0 }, - .tkid = { 0x76, 0x98, 0x01, }, - .dest_addr = { .data = { 0xbe, 0x00 } }, - .src_addr = { .data = { 0x76, 0x98 } }, -}; - -static const struct wusb_keydvt_out stv_keydvt_out_a1 = { - .kck = { - 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, - 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f - }, - .ptk = { - 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06, - 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d - } -}; - -/* - * Performa a test to make sure we match the vectors defined in - * WUSB1.0[A.1](Errata2006/12) - */ -static int wusb_key_derive_verify(void) -{ - int result = 0; - struct wusb_keydvt_out keydvt_out; - /* These come from WUSB1.0[A.1] + 2006/12 errata */ - static const struct wusb_keydvt_in stv_keydvt_in_a1 = { - .hnonce = { - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f - }, - .dnonce = { - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f - } - }; - - result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1, - &stv_keydvt_in_a1); - if (result < 0) - printk(KERN_ERR "E: WUSB key derivation test: " - "derivation failed: %d\n", result); - if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) { - printk(KERN_ERR "E: WUSB key derivation test: " - "mismatch between key derivation result " - "and WUSB1.0[A1] Errata 2006/12\n"); - printk(KERN_ERR "E: keydvt in: key\n"); - wusb_key_dump(stv_key_a1, sizeof(stv_key_a1)); - printk(KERN_ERR "E: keydvt in: nonce\n"); - wusb_key_dump(&stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); - printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n"); - wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); - printk(KERN_ERR "E: keydvt out: KCK\n"); - wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck)); - printk(KERN_ERR "E: keydvt out: PTK\n"); - wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk)); - result = -EINVAL; - } else - result = 0; - return result; -} - -/* - * Initialize crypto system - * - * FIXME: we do nothing now, other than verifying. Later on we'll - * cache the encryption stuff, so that's why we have a separate init. - */ -int wusb_crypto_init(void) -{ - int result; - - if (debug_crypto_verify) { - result = wusb_key_derive_verify(); - if (result < 0) - return result; - return wusb_oob_mic_verify(); - } - return 0; -} - -void wusb_crypto_exit(void) -{ - /* FIXME: free cached crypto transforms */ -} diff --git a/drivers/staging/wusbcore/dev-sysfs.c b/drivers/staging/wusbcore/dev-sysfs.c deleted file mode 100644 index 67b0a4c412b2..000000000000 --- a/drivers/staging/wusbcore/dev-sysfs.c +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB devices - * sysfs bindings - * - * Copyright (C) 2007 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Get them out of the way... - */ - -#include <linux/jiffies.h> -#include <linux/ctype.h> -#include <linux/workqueue.h> -#include "wusbhc.h" - -static ssize_t wusb_disconnect_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct usb_device *usb_dev; - struct wusbhc *wusbhc; - unsigned command; - u8 port_idx; - - if (sscanf(buf, "%u", &command) != 1) - return -EINVAL; - if (command == 0) - return size; - usb_dev = to_usb_device(dev); - wusbhc = wusbhc_get_by_usb_dev(usb_dev); - if (wusbhc == NULL) - return -ENODEV; - - mutex_lock(&wusbhc->mutex); - port_idx = wusb_port_no_to_idx(usb_dev->portnum); - __wusbhc_dev_disable(wusbhc, port_idx); - mutex_unlock(&wusbhc->mutex); - wusbhc_put(wusbhc); - return size; -} -static DEVICE_ATTR_WO(wusb_disconnect); - -static ssize_t wusb_cdid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t result; - struct wusb_dev *wusb_dev; - - wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev)); - if (wusb_dev == NULL) - return -ENODEV; - result = sprintf(buf, "%16ph\n", wusb_dev->cdid.data); - wusb_dev_put(wusb_dev); - return result; -} -static DEVICE_ATTR_RO(wusb_cdid); - -static ssize_t wusb_ck_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int result; - struct usb_device *usb_dev; - struct wusbhc *wusbhc; - struct wusb_ckhdid ck; - - result = sscanf(buf, - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx\n", - &ck.data[0] , &ck.data[1], - &ck.data[2] , &ck.data[3], - &ck.data[4] , &ck.data[5], - &ck.data[6] , &ck.data[7], - &ck.data[8] , &ck.data[9], - &ck.data[10], &ck.data[11], - &ck.data[12], &ck.data[13], - &ck.data[14], &ck.data[15]); - if (result != 16) - return -EINVAL; - - usb_dev = to_usb_device(dev); - wusbhc = wusbhc_get_by_usb_dev(usb_dev); - if (wusbhc == NULL) - return -ENODEV; - result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck); - memzero_explicit(&ck, sizeof(ck)); - wusbhc_put(wusbhc); - return result < 0 ? result : size; -} -static DEVICE_ATTR_WO(wusb_ck); - -static struct attribute *wusb_dev_attrs[] = { - &dev_attr_wusb_disconnect.attr, - &dev_attr_wusb_cdid.attr, - &dev_attr_wusb_ck.attr, - NULL, -}; - -static const struct attribute_group wusb_dev_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = wusb_dev_attrs, -}; - -int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev, - struct wusb_dev *wusb_dev) -{ - int result = sysfs_create_group(&usb_dev->dev.kobj, - &wusb_dev_attr_group); - struct device *dev = &usb_dev->dev; - if (result < 0) - dev_err(dev, "Cannot register WUSB-dev attributes: %d\n", - result); - return result; -} - -void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev) -{ - struct usb_device *usb_dev = wusb_dev->usb_dev; - if (usb_dev) - sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group); -} diff --git a/drivers/staging/wusbcore/devconnect.c b/drivers/staging/wusbcore/devconnect.c deleted file mode 100644 index 1170f8baf608..000000000000 --- a/drivers/staging/wusbcore/devconnect.c +++ /dev/null @@ -1,1085 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) - * Device Connect handling - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * FIXME: this file needs to be broken up, it's grown too big - * - * - * WUSB1.0[7.1, 7.5.1, ] - * - * WUSB device connection is kind of messy. Some background: - * - * When a device wants to connect it scans the UWB radio channels - * looking for a WUSB Channel; a WUSB channel is defined by MMCs - * (Micro Managed Commands or something like that) [see - * Design-overview for more on this] . - * - * So, device scans the radio, finds MMCs and thus a host and checks - * when the next DNTS is. It sends a Device Notification Connect - * (DN_Connect); the host picks it up (through nep.c and notif.c, ends - * up in wusb_devconnect_ack(), which creates a wusb_dev structure in - * wusbhc->port[port_number].wusb_dev), assigns an unauth address - * to the device (this means from 0x80 to 0xfe) and sends, in the MMC - * a Connect Ack Information Element (ConnAck IE). - * - * So now the device now has a WUSB address. From now on, we use - * that to talk to it in the RPipes. - * - * ASSUMPTIONS: - * - * - We use the the as device address the port number where it is - * connected (port 0 doesn't exist). For unauth, it is 128 + that. - * - * ROADMAP: - * - * This file contains the logic for doing that--entry points: - * - * wusb_devconnect_ack() Ack a device until _acked() called. - * Called by notif.c:wusb_handle_dn_connect() - * when a DN_Connect is received. - * - * wusb_devconnect_acked() Ack done, release resources. - * - * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() - * for processing a DN_Alive pong from a device. - * - * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to - * process a disconnect request from a - * device. - * - * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when - * disabling a port. - * - * wusb_devconnect_create() Called when creating the host by - * lc.c:wusbhc_create(). - * - * wusb_devconnect_destroy() Cleanup called removing the host. Called - * by lc.c:wusbhc_destroy(). - * - * Each Wireless USB host maintains a list of DN_Connect requests - * (actually we maintain a list of pending Connect Acks, the - * wusbhc->ca_list). - * - * LIFE CYCLE OF port->wusb_dev - * - * Before the @wusbhc structure put()s the reference it owns for - * port->wusb_dev [and clean the wusb_dev pointer], it needs to - * lock @wusbhc->mutex. - */ - -#include <linux/jiffies.h> -#include <linux/ctype.h> -#include <linux/slab.h> -#include <linux/workqueue.h> -#include <linux/export.h> -#include "wusbhc.h" - -static void wusbhc_devconnect_acked_work(struct work_struct *work); - -static void wusb_dev_free(struct wusb_dev *wusb_dev) -{ - kfree(wusb_dev); -} - -static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) -{ - struct wusb_dev *wusb_dev; - - wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); - if (wusb_dev == NULL) - goto err; - - wusb_dev->wusbhc = wusbhc; - - INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); - - return wusb_dev; -err: - wusb_dev_free(wusb_dev); - return NULL; -} - - -/* - * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE - * properly so that it can be added to the MMC. - * - * We just get the @wusbhc->ca_list and fill out the first four ones or - * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB - * IE is not allocated, we alloc it. - * - * @wusbhc->mutex must be taken - */ -static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc) -{ - unsigned cnt; - struct wusb_dev *dev_itr; - struct wuie_connect_ack *cack_ie; - - cack_ie = &wusbhc->cack_ie; - cnt = 0; - list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) { - cack_ie->blk[cnt].CDID = dev_itr->cdid; - cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr; - if (++cnt >= WUIE_ELT_MAX) - break; - } - cack_ie->hdr.bLength = sizeof(cack_ie->hdr) - + cnt * sizeof(cack_ie->blk[0]); -} - -/* - * Register a new device that wants to connect - * - * A new device wants to connect, so we add it to the Connect-Ack - * list. We give it an address in the unauthorized range (bit 8 set); - * user space will have to drive authorization further on. - * - * @dev_addr: address to use for the device (which is also the port - * number). - * - * @wusbhc->mutex must be taken - */ -static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, - struct wusb_dn_connect *dnc, - const char *pr_cdid, u8 port_idx) -{ - struct device *dev = wusbhc->dev; - struct wusb_dev *wusb_dev; - int new_connection = wusb_dn_connect_new_connection(dnc); - u8 dev_addr; - int result; - - /* Is it registered already? */ - list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node) - if (!memcmp(&wusb_dev->cdid, &dnc->CDID, - sizeof(wusb_dev->cdid))) - return wusb_dev; - /* We don't have it, create an entry, register it */ - wusb_dev = wusb_dev_alloc(wusbhc); - if (wusb_dev == NULL) - return NULL; - wusb_dev_init(wusb_dev); - wusb_dev->cdid = dnc->CDID; - wusb_dev->port_idx = port_idx; - - /* - * Devices are always available within the cluster reservation - * and since the hardware will take the intersection of the - * per-device availability and the cluster reservation, the - * per-device availability can simply be set to always - * available. - */ - bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS); - - /* FIXME: handle reconnects instead of assuming connects are - always new. */ - if (1 && new_connection == 0) - new_connection = 1; - if (new_connection) { - dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH; - - dev_info(dev, "Connecting new WUSB device to address %u, " - "port %u\n", dev_addr, port_idx); - - result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); - if (result < 0) - return NULL; - } - wusb_dev->entry_ts = jiffies; - list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); - wusbhc->cack_count++; - wusbhc_fill_cack_ie(wusbhc); - - return wusb_dev; -} - -/* - * Remove a Connect-Ack context entry from the HCs view - * - * @wusbhc->mutex must be taken - */ -static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - list_del_init(&wusb_dev->cack_node); - wusbhc->cack_count--; - wusbhc_fill_cack_ie(wusbhc); -} - -/* - * @wusbhc->mutex must be taken */ -static -void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - wusbhc_cack_rm(wusbhc, wusb_dev); - if (wusbhc->cack_count) - wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); - else - wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); -} - -static void wusbhc_devconnect_acked_work(struct work_struct *work) -{ - struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, - devconnect_acked_work); - struct wusbhc *wusbhc = wusb_dev->wusbhc; - - mutex_lock(&wusbhc->mutex); - wusbhc_devconnect_acked(wusbhc, wusb_dev); - mutex_unlock(&wusbhc->mutex); - - wusb_dev_put(wusb_dev); -} - -/* - * Ack a device for connection - * - * FIXME: docs - * - * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal. - * - * So we get the connect ack IE (may have been allocated already), - * find an empty connect block, an empty virtual port, create an - * address with it (see below), make it an unauth addr [bit 7 set] and - * set the MMC. - * - * Addresses: because WUSB hosts have no downstream hubs, we can do a - * 1:1 mapping between 'port number' and device - * address. This simplifies many things, as during this - * initial connect phase the USB stack has no knowledge of - * the device and hasn't assigned an address yet--we know - * USB's choose_address() will use the same heuristics we - * use here, so we can assume which address will be assigned. - * - * USB stack always assigns address 1 to the root hub, so - * to the port number we add 2 (thus virtual port #0 is - * addr #2). - * - * @wusbhc shall be referenced - */ -static -void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, - const char *pr_cdid) -{ - int result; - struct device *dev = wusbhc->dev; - struct wusb_dev *wusb_dev; - struct wusb_port *port; - unsigned idx; - - mutex_lock(&wusbhc->mutex); - - /* Check we are not handling it already */ - for (idx = 0; idx < wusbhc->ports_max; idx++) { - port = wusb_port_by_idx(wusbhc, idx); - if (port->wusb_dev - && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0) - goto error_unlock; - } - /* Look up those fake ports we have for a free one */ - for (idx = 0; idx < wusbhc->ports_max; idx++) { - port = wusb_port_by_idx(wusbhc, idx); - if ((port->status & USB_PORT_STAT_POWER) - && !(port->status & USB_PORT_STAT_CONNECTION)) - break; - } - if (idx >= wusbhc->ports_max) { - dev_err(dev, "Host controller can't connect more devices " - "(%u already connected); device %s rejected\n", - wusbhc->ports_max, pr_cdid); - /* NOTE: we could send a WUIE_Disconnect here, but we haven't - * event acked, so the device will eventually timeout the - * connection, right? */ - goto error_unlock; - } - - /* Make sure we are using no crypto on that "virtual port" */ - wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); - - /* Grab a filled in Connect-Ack context, fill out the - * Connect-Ack Wireless USB IE, set the MMC */ - wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx); - if (wusb_dev == NULL) - goto error_unlock; - result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); - if (result < 0) - goto error_unlock; - /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do - * three for a good measure */ - msleep(3); - port->wusb_dev = wusb_dev; - port->status |= USB_PORT_STAT_CONNECTION; - port->change |= USB_PORT_STAT_C_CONNECTION; - /* Now the port status changed to connected; hub_wq will - * pick the change up and try to reset the port to bring it to - * the enabled state--so this process returns up to the stack - * and it calls back into wusbhc_rh_port_reset(). - */ -error_unlock: - mutex_unlock(&wusbhc->mutex); - return; - -} - -/* - * Disconnect a Wireless USB device from its fake port - * - * Marks the port as disconnected so that hub_wq can pick up the change - * and drops our knowledge about the device. - * - * Assumes there is a device connected - * - * @port_index: zero based port number - * - * NOTE: @wusbhc->mutex is locked - * - * WARNING: From here it is not very safe to access anything hanging off - * wusb_dev - */ -static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, - struct wusb_port *port) -{ - struct wusb_dev *wusb_dev = port->wusb_dev; - - port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE - | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET - | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); - port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; - if (wusb_dev) { - dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx); - if (!list_empty(&wusb_dev->cack_node)) - list_del_init(&wusb_dev->cack_node); - /* For the one in cack_add() */ - wusb_dev_put(wusb_dev); - } - port->wusb_dev = NULL; - - /* After a device disconnects, change the GTK (see [WUSB] - * section 6.2.11.2). */ - if (wusbhc->active) - wusbhc_gtk_rekey(wusbhc); - - /* The Wireless USB part has forgotten about the device already; now - * hub_wq's timer will pick up the disconnection and remove the USB - * device from the system - */ -} - -/* - * Refresh the list of keep alives to emit in the MMC - * - * We only publish the first four devices that have a coming timeout - * condition. Then when we are done processing those, we go for the - * next ones. We ignore the ones that have timed out already (they'll - * be purged). - * - * This might cause the first devices to timeout the last devices in - * the port array...FIXME: come up with a better algorithm? - * - * Note we can't do much about MMC's ops errors; we hope next refresh - * will kind of handle it. - * - * NOTE: @wusbhc->mutex is locked - */ -static void __wusbhc_keep_alive(struct wusbhc *wusbhc) -{ - struct device *dev = wusbhc->dev; - unsigned cnt; - struct wusb_dev *wusb_dev; - struct wusb_port *wusb_port; - struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie; - unsigned keep_alives, old_keep_alives; - - old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); - keep_alives = 0; - for (cnt = 0; - keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; - cnt++) { - unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); - - wusb_port = wusb_port_by_idx(wusbhc, cnt); - wusb_dev = wusb_port->wusb_dev; - - if (wusb_dev == NULL) - continue; - if (wusb_dev->usb_dev == NULL) - continue; - - if (time_after(jiffies, wusb_dev->entry_ts + tt)) { - dev_err(dev, "KEEPALIVE: device %u timed out\n", - wusb_dev->addr); - __wusbhc_dev_disconnect(wusbhc, wusb_port); - } else if (time_after(jiffies, wusb_dev->entry_ts + tt/3)) { - /* Approaching timeout cut off, need to refresh */ - ie->bDeviceAddress[keep_alives++] = wusb_dev->addr; - } - } - if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */ - ie->bDeviceAddress[keep_alives++] = 0x7f; - ie->hdr.bLength = sizeof(ie->hdr) + - keep_alives*sizeof(ie->bDeviceAddress[0]); - if (keep_alives > 0) - wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr); - else if (old_keep_alives != 0) - wusbhc_mmcie_rm(wusbhc, &ie->hdr); -} - -/* - * Do a run through all devices checking for timeouts - */ -static void wusbhc_keep_alive_run(struct work_struct *ws) -{ - struct delayed_work *dw = to_delayed_work(ws); - struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); - - mutex_lock(&wusbhc->mutex); - __wusbhc_keep_alive(wusbhc); - mutex_unlock(&wusbhc->mutex); - - queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, - msecs_to_jiffies(wusbhc->trust_timeout / 2)); -} - -/* - * Find the wusb_dev from its device address. - * - * The device can be found directly from the address (see - * wusb_cack_add() for where the device address is set to port_idx - * +2), except when the address is zero. - */ -static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) -{ - int p; - - if (addr == 0xff) /* unconnected */ - return NULL; - - if (addr > 0) { - int port = (addr & ~0x80) - 2; - if (port < 0 || port >= wusbhc->ports_max) - return NULL; - return wusb_port_by_idx(wusbhc, port)->wusb_dev; - } - - /* Look for the device with address 0. */ - for (p = 0; p < wusbhc->ports_max; p++) { - struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev; - if (wusb_dev && wusb_dev->addr == addr) - return wusb_dev; - } - return NULL; -} - -/* - * Handle a DN_Alive notification (WUSB1.0[7.6.1]) - * - * This just updates the device activity timestamp and then refreshes - * the keep alive IE. - * - * @wusbhc shall be referenced and unlocked - */ -static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr) -{ - struct wusb_dev *wusb_dev; - - mutex_lock(&wusbhc->mutex); - wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); - if (wusb_dev == NULL) { - dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n", - srcaddr); - } else { - wusb_dev->entry_ts = jiffies; - __wusbhc_keep_alive(wusbhc); - } - mutex_unlock(&wusbhc->mutex); -} - -/* - * Handle a DN_Connect notification (WUSB1.0[7.6.1]) - * - * @wusbhc - * @pkt_hdr - * @size: Size of the buffer where the notification resides; if the - * notification data suggests there should be more data than - * available, an error will be signaled and the whole buffer - * consumed. - * - * @wusbhc->mutex shall be held - */ -static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, - struct wusb_dn_hdr *dn_hdr, - size_t size) -{ - struct device *dev = wusbhc->dev; - struct wusb_dn_connect *dnc; - char pr_cdid[WUSB_CKHDID_STRSIZE]; - static const char *beacon_behaviour[] = { - "reserved", - "self-beacon", - "directed-beacon", - "no-beacon" - }; - - if (size < sizeof(*dnc)) { - dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", - size, sizeof(*dnc)); - return; - } - - dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); - sprintf(pr_cdid, "%16ph", dnc->CDID.data); - dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n", - pr_cdid, - wusb_dn_connect_prev_dev_addr(dnc), - beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)], - wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); - /* ACK the connect */ - wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); -} - -/* - * Handle a DN_Disconnect notification (WUSB1.0[7.6.1]) - * - * Device is going down -- do the disconnect. - * - * @wusbhc shall be referenced and unlocked - */ -static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr) -{ - struct device *dev = wusbhc->dev; - struct wusb_dev *wusb_dev; - - mutex_lock(&wusbhc->mutex); - wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); - if (wusb_dev == NULL) { - dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n", - srcaddr); - } else { - dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", - wusb_dev->addr); - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, - wusb_dev->port_idx)); - } - mutex_unlock(&wusbhc->mutex); -} - -/* - * Handle a Device Notification coming a host - * - * The Device Notification comes from a host (HWA, DWA or WHCI) - * wrapped in a set of headers. Somebody else has peeled off those - * headers for us and we just get one Device Notifications. - * - * Invalid DNs (e.g., too short) are discarded. - * - * @wusbhc shall be referenced - * - * FIXMES: - * - implement priorities as in WUSB1.0[Table 7-55]? - */ -void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, - struct wusb_dn_hdr *dn_hdr, size_t size) -{ - struct device *dev = wusbhc->dev; - - if (size < sizeof(struct wusb_dn_hdr)) { - dev_err(dev, "DN data shorter than DN header (%d < %d)\n", - (int)size, (int)sizeof(struct wusb_dn_hdr)); - return; - } - switch (dn_hdr->bType) { - case WUSB_DN_CONNECT: - wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); - break; - case WUSB_DN_ALIVE: - wusbhc_handle_dn_alive(wusbhc, srcaddr); - break; - case WUSB_DN_DISCONNECT: - wusbhc_handle_dn_disconnect(wusbhc, srcaddr); - break; - case WUSB_DN_MASAVAILCHANGED: - case WUSB_DN_RWAKE: - case WUSB_DN_SLEEP: - /* FIXME: handle these DNs. */ - break; - case WUSB_DN_EPRDY: - /* The hardware handles these. */ - break; - default: - dev_warn(dev, "unknown DN %u (%d octets) from %u\n", - dn_hdr->bType, (int)size, srcaddr); - } -} -EXPORT_SYMBOL_GPL(wusbhc_handle_dn); - -/* - * Disconnect a WUSB device from a the cluster - * - * @wusbhc - * @port Fake port where the device is (wusbhc index, not USB port number). - * - * In Wireless USB, a disconnect is basically telling the device he is - * being disconnected and forgetting about him. - * - * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100 - * ms and then keep going. - * - * We don't do much in case of error; we always pretend we disabled - * the port and disconnected the device. If physically the request - * didn't get there (many things can fail in the way there), the stack - * will reject the device's communication attempts. - * - * @wusbhc should be refcounted and locked - */ -void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) -{ - int result; - struct device *dev = wusbhc->dev; - struct wusb_dev *wusb_dev; - struct wuie_disconnect *ie; - - wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; - if (wusb_dev == NULL) { - /* reset no device? ignore */ - dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", - port_idx); - return; - } - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); - - ie = kzalloc(sizeof(*ie), GFP_KERNEL); - if (ie == NULL) - return; - ie->hdr.bLength = sizeof(*ie); - ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; - ie->bDeviceAddress = wusb_dev->addr; - result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); - if (result < 0) - dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); - else { - /* At least 6 MMCs, assuming at least 1 MMC per zone. */ - msleep(7*4); - wusbhc_mmcie_rm(wusbhc, &ie->hdr); - } - kfree(ie); -} - -/* - * Walk over the BOS descriptor, verify and grok it - * - * @usb_dev: referenced - * @wusb_dev: referenced and unlocked - * - * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a - * "flexible" way to wrap all kinds of descriptors inside an standard - * descriptor (wonder why they didn't use normal descriptors, - * btw). Not like they lack code. - * - * At the end we go to look for the WUSB Device Capabilities - * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor - * that is part of the BOS descriptor set. That tells us what does the - * device support (dual role, beacon type, UWB PHY rates). - */ -static int wusb_dev_bos_grok(struct usb_device *usb_dev, - struct wusb_dev *wusb_dev, - struct usb_bos_descriptor *bos, size_t desc_size) -{ - ssize_t result; - struct device *dev = &usb_dev->dev; - void *itr, *top; - - /* Walk over BOS capabilities, verify them */ - itr = (void *)bos + sizeof(*bos); - top = itr + desc_size - sizeof(*bos); - while (itr < top) { - struct usb_dev_cap_header *cap_hdr = itr; - size_t cap_size; - u8 cap_type; - if (top - itr < sizeof(*cap_hdr)) { - dev_err(dev, "Device BUG? premature end of BOS header " - "data [offset 0x%02x]: only %zu bytes left\n", - (int)(itr - (void *)bos), top - itr); - result = -ENOSPC; - goto error_bad_cap; - } - cap_size = cap_hdr->bLength; - cap_type = cap_hdr->bDevCapabilityType; - if (cap_size == 0) - break; - if (cap_size > top - itr) { - dev_err(dev, "Device BUG? premature end of BOS data " - "[offset 0x%02x cap %02x %zu bytes]: " - "only %zu bytes left\n", - (int)(itr - (void *)bos), - cap_type, cap_size, top - itr); - result = -EBADF; - goto error_bad_cap; - } - switch (cap_type) { - case USB_CAP_TYPE_WIRELESS_USB: - if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) - dev_err(dev, "Device BUG? WUSB Capability " - "descriptor is %zu bytes vs %zu " - "needed\n", cap_size, - sizeof(*wusb_dev->wusb_cap_descr)); - else - wusb_dev->wusb_cap_descr = itr; - break; - default: - dev_err(dev, "BUG? Unknown BOS capability 0x%02x " - "(%zu bytes) at offset 0x%02x\n", cap_type, - cap_size, (int)(itr - (void *)bos)); - } - itr += cap_size; - } - result = 0; -error_bad_cap: - return result; -} - -/* - * Add information from the BOS descriptors to the device - * - * @usb_dev: referenced - * @wusb_dev: referenced and unlocked - * - * So what we do is we alloc a space for the BOS descriptor of 64 - * bytes; read the first four bytes which include the wTotalLength - * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the - * whole thing. If not we realloc to that size. - * - * Then we call the groking function, that will fill up - * wusb_dev->wusb_cap_descr, which is what we'll need later on. - */ -static int wusb_dev_bos_add(struct usb_device *usb_dev, - struct wusb_dev *wusb_dev) -{ - ssize_t result; - struct device *dev = &usb_dev->dev; - struct usb_bos_descriptor *bos; - size_t alloc_size = 32, desc_size = 4; - - bos = kmalloc(alloc_size, GFP_KERNEL); - if (bos == NULL) - return -ENOMEM; - result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); - if (result < 4) { - dev_err(dev, "Can't get BOS descriptor or too short: %zd\n", - result); - goto error_get_descriptor; - } - desc_size = le16_to_cpu(bos->wTotalLength); - if (desc_size >= alloc_size) { - kfree(bos); - alloc_size = desc_size; - bos = kmalloc(alloc_size, GFP_KERNEL); - if (bos == NULL) - return -ENOMEM; - } - result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); - if (result < 0 || result != desc_size) { - dev_err(dev, "Can't get BOS descriptor or too short (need " - "%zu bytes): %zd\n", desc_size, result); - goto error_get_descriptor; - } - if (result < sizeof(*bos) - || le16_to_cpu(bos->wTotalLength) != desc_size) { - dev_err(dev, "Can't get BOS descriptor or too short (need " - "%zu bytes): %zd\n", desc_size, result); - goto error_get_descriptor; - } - - result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); - if (result < 0) - goto error_bad_bos; - wusb_dev->bos = bos; - return 0; - -error_bad_bos: -error_get_descriptor: - kfree(bos); - wusb_dev->wusb_cap_descr = NULL; - return result; -} - -static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev) -{ - kfree(wusb_dev->bos); - wusb_dev->wusb_cap_descr = NULL; -}; - -/* - * USB stack's device addition Notifier Callback - * - * Called from drivers/usb/core/hub.c when a new device is added; we - * use this hook to perform certain WUSB specific setup work on the - * new device. As well, it is the first time we can connect the - * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a - * reference that we'll drop. - * - * First we need to determine if the device is a WUSB device (else we - * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS) - * [FIXME: maybe we'd need something more definitive]. If so, we track - * it's usb_busd and from there, the WUSB HC. - * - * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we - * get the wusbhc for the device. - * - * We have a reference on @usb_dev (as we are called at the end of its - * enumeration). - * - * NOTE: @usb_dev locked - */ -static void wusb_dev_add_ncb(struct usb_device *usb_dev) -{ - int result = 0; - struct wusb_dev *wusb_dev; - struct wusbhc *wusbhc; - struct device *dev = &usb_dev->dev; - u8 port_idx; - - if (usb_dev->wusb == 0 || usb_dev->devnum == 1) - return; /* skip non wusb and wusb RHs */ - - usb_set_device_state(usb_dev, USB_STATE_UNAUTHENTICATED); - - wusbhc = wusbhc_get_by_usb_dev(usb_dev); - if (wusbhc == NULL) - goto error_nodev; - mutex_lock(&wusbhc->mutex); - wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); - port_idx = wusb_port_no_to_idx(usb_dev->portnum); - mutex_unlock(&wusbhc->mutex); - if (wusb_dev == NULL) - goto error_nodev; - wusb_dev->usb_dev = usb_get_dev(usb_dev); - usb_dev->wusb_dev = wusb_dev_get(wusb_dev); - result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev); - if (result < 0) { - dev_err(dev, "Cannot enable security: %d\n", result); - goto error_sec_add; - } - /* Now query the device for it's BOS and attach it to wusb_dev */ - result = wusb_dev_bos_add(usb_dev, wusb_dev); - if (result < 0) { - dev_err(dev, "Cannot get BOS descriptors: %d\n", result); - goto error_bos_add; - } - result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev); - if (result < 0) - goto error_add_sysfs; -out: - wusb_dev_put(wusb_dev); - wusbhc_put(wusbhc); -error_nodev: - return; - -error_add_sysfs: - wusb_dev_bos_rm(wusb_dev); -error_bos_add: - wusb_dev_sec_rm(wusb_dev); -error_sec_add: - mutex_lock(&wusbhc->mutex); - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); - mutex_unlock(&wusbhc->mutex); - goto out; -} - -/* - * Undo all the steps done at connection by the notifier callback - * - * NOTE: @usb_dev locked - */ -static void wusb_dev_rm_ncb(struct usb_device *usb_dev) -{ - struct wusb_dev *wusb_dev = usb_dev->wusb_dev; - - if (usb_dev->wusb == 0 || usb_dev->devnum == 1) - return; /* skip non wusb and wusb RHs */ - - wusb_dev_sysfs_rm(wusb_dev); - wusb_dev_bos_rm(wusb_dev); - wusb_dev_sec_rm(wusb_dev); - wusb_dev->usb_dev = NULL; - usb_dev->wusb_dev = NULL; - wusb_dev_put(wusb_dev); - usb_put_dev(usb_dev); -} - -/* - * Handle notifications from the USB stack (notifier call back) - * - * This is called when the USB stack does a - * usb_{bus,device}_{add,remove}() so we can do WUSB specific - * handling. It is called with [for the case of - * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked. - */ -int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, - void *priv) -{ - int result = NOTIFY_OK; - - switch (val) { - case USB_DEVICE_ADD: - wusb_dev_add_ncb(priv); - break; - case USB_DEVICE_REMOVE: - wusb_dev_rm_ncb(priv); - break; - case USB_BUS_ADD: - /* ignore (for now) */ - case USB_BUS_REMOVE: - break; - default: - WARN_ON(1); - result = NOTIFY_BAD; - } - return result; -} - -/* - * Return a referenced wusb_dev given a @wusbhc and @usb_dev - */ -struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc, - struct usb_device *usb_dev) -{ - struct wusb_dev *wusb_dev; - u8 port_idx; - - port_idx = wusb_port_no_to_idx(usb_dev->portnum); - BUG_ON(port_idx > wusbhc->ports_max); - wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; - if (wusb_dev != NULL) /* ops, device is gone */ - wusb_dev_get(wusb_dev); - return wusb_dev; -} -EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); - -void wusb_dev_destroy(struct kref *_wusb_dev) -{ - struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); - - list_del_init(&wusb_dev->cack_node); - wusb_dev_free(wusb_dev); -} -EXPORT_SYMBOL_GPL(wusb_dev_destroy); - -/* - * Create all the device connect handling infrastructure - * - * This is basically the device info array, Connect Acknowledgement - * (cack) lists, keep-alive timers (and delayed work thread). - */ -int wusbhc_devconnect_create(struct wusbhc *wusbhc) -{ - wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; - wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); - INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); - - wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK; - wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); - INIT_LIST_HEAD(&wusbhc->cack_list); - - return 0; -} - -/* - * Release all resources taken by the devconnect stuff - */ -void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) -{ - /* no op */ -} - -/* - * wusbhc_devconnect_start - start accepting device connections - * @wusbhc: the WUSB HC - * - * Sets the Host Info IE to accept all new connections. - * - * FIXME: This also enables the keep alives but this is not necessary - * until there are connected and authenticated devices. - */ -int wusbhc_devconnect_start(struct wusbhc *wusbhc) -{ - struct device *dev = wusbhc->dev; - struct wuie_host_info *hi; - int result; - - hi = kzalloc(sizeof(*hi), GFP_KERNEL); - if (hi == NULL) - return -ENOMEM; - - hi->hdr.bLength = sizeof(*hi); - hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; - hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); - hi->CHID = wusbhc->chid; - result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); - if (result < 0) { - dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); - goto error_mmcie_set; - } - wusbhc->wuie_host_info = hi; - - queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, - msecs_to_jiffies(wusbhc->trust_timeout / 2)); - - return 0; - -error_mmcie_set: - kfree(hi); - return result; -} - -/* - * wusbhc_devconnect_stop - stop managing connected devices - * @wusbhc: the WUSB HC - * - * Disconnects any devices still connected, stops the keep alives and - * removes the Host Info IE. - */ -void wusbhc_devconnect_stop(struct wusbhc *wusbhc) -{ - int i; - - mutex_lock(&wusbhc->mutex); - for (i = 0; i < wusbhc->ports_max; i++) { - if (wusbhc->port[i].wusb_dev) - __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]); - } - mutex_unlock(&wusbhc->mutex); - - cancel_delayed_work_sync(&wusbhc->keep_alive_timer); - wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); - kfree(wusbhc->wuie_host_info); - wusbhc->wuie_host_info = NULL; -} - -/* - * wusb_set_dev_addr - set the WUSB device address used by the host - * @wusbhc: the WUSB HC the device is connect to - * @wusb_dev: the WUSB device - * @addr: new device address - */ -int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) -{ - int result; - - wusb_dev->addr = addr; - result = wusbhc->dev_info_set(wusbhc, wusb_dev); - if (result < 0) - dev_err(wusbhc->dev, "device %d: failed to set device " - "address\n", wusb_dev->port_idx); - else - dev_info(wusbhc->dev, "device %d: %s addr %u\n", - wusb_dev->port_idx, - (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth", - wusb_dev->addr); - - return result; -} diff --git a/drivers/staging/wusbcore/host/Kconfig b/drivers/staging/wusbcore/host/Kconfig deleted file mode 100644 index 9a73f9360a08..000000000000 --- a/drivers/staging/wusbcore/host/Kconfig +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -config USB_WHCI_HCD - tristate "Wireless USB Host Controller Interface (WHCI) driver" - depends on USB_PCI && USB && UWB - select USB_WUSB - select UWB_WHCI - help - A driver for PCI-based Wireless USB Host Controllers that are - compliant with the WHCI specification. - - To compile this driver a module, choose M here: the module - will be called "whci-hcd". - -config USB_HWA_HCD - tristate "Host Wire Adapter (HWA) driver" - depends on USB && UWB - select USB_WUSB - select UWB_HWA - help - This driver enables you to connect Wireless USB devices to - your system using a Host Wire Adaptor USB dongle. This is an - UWB Radio Controller and WUSB Host Controller connected to - your machine via USB (specified in WUSB1.0). - - To compile this driver a module, choose M here: the module - will be called "hwa-hc". - diff --git a/drivers/staging/wusbcore/host/Makefile b/drivers/staging/wusbcore/host/Makefile deleted file mode 100644 index d65ee8a73e21..000000000000 --- a/drivers/staging/wusbcore/host/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_USB_WHCI_HCD) += whci/ -obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o diff --git a/drivers/staging/wusbcore/host/hwa-hc.c b/drivers/staging/wusbcore/host/hwa-hc.c deleted file mode 100644 index 8d959e91fe27..000000000000 --- a/drivers/staging/wusbcore/host/hwa-hc.c +++ /dev/null @@ -1,875 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Host Wire Adapter: - * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * The HWA driver is a simple layer that forwards requests to the WAHC - * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host - * Controller) layers. - * - * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB - * Host Controller that is connected to your system via USB (a USB - * dongle that implements a USB host...). There is also a Device Wired - * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for - * transferring data (it is after all a USB host connected via - * Wireless USB), we have a common layer called Wire Adapter Host - * Controller that does all the hard work. The WUSBHC (Wireless USB - * Host Controller) is the part common to WUSB Host Controllers, the - * HWA and the PCI-based one, that is implemented following the WHCI - * spec. All these layers are implemented in ../wusbcore. - * - * The main functions are hwahc_op_urb_{en,de}queue(), that pass the - * job of converting a URB to a Wire Adapter - * - * Entry points: - * - * hwahc_driver_*() Driver initialization, registration and - * teardown. - * - * hwahc_probe() New device came up, create an instance for - * it [from device enumeration]. - * - * hwahc_disconnect() Remove device instance [from device - * enumeration]. - * - * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for - * starting/stopping/etc (some might be made also - * DWA). - */ -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/workqueue.h> -#include <linux/wait.h> -#include <linux/completion.h> -#include "../wa-hc.h" -#include "../wusbhc.h" - -struct hwahc { - struct wusbhc wusbhc; /* has to be 1st */ - struct wahc wa; -}; - -/* - * FIXME should be wusbhc - * - * NOTE: we need to cache the Cluster ID because later...there is no - * way to get it :) - */ -static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id) -{ - int result; - struct wusbhc *wusbhc = &hwahc->wusbhc; - struct wahc *wa = &hwahc->wa; - struct device *dev = &wa->usb_iface->dev; - - result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_SET_CLUSTER_ID, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - cluster_id, - wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) - dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n", - cluster_id, result); - else - wusbhc->cluster_id = cluster_id; - dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id); - return result; -} - -static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) -{ - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - - return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_SET_NUM_DNTS, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - interval << 8 | slots, - wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - NULL, 0, USB_CTRL_SET_TIMEOUT); -} - -/* - * Reset a WUSB host controller and wait for it to complete doing it. - * - * @usb_hcd: Pointer to WUSB Host Controller instance. - * - */ -static int hwahc_op_reset(struct usb_hcd *usb_hcd) -{ - int result; - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct device *dev = &hwahc->wa.usb_iface->dev; - - mutex_lock(&wusbhc->mutex); - wa_nep_disarm(&hwahc->wa); - result = __wa_set_feature(&hwahc->wa, WA_RESET); - if (result < 0) { - dev_err(dev, "error commanding HC to reset: %d\n", result); - goto error_unlock; - } - result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); - if (result < 0) { - dev_err(dev, "error waiting for HC to reset: %d\n", result); - goto error_unlock; - } -error_unlock: - mutex_unlock(&wusbhc->mutex); - return result; -} - -/* - * FIXME: break this function up - */ -static int hwahc_op_start(struct usb_hcd *usb_hcd) -{ - u8 addr; - int result; - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - - result = -ENOSPC; - mutex_lock(&wusbhc->mutex); - addr = wusb_cluster_id_get(); - if (addr == 0) - goto error_cluster_id_get; - result = __hwahc_set_cluster_id(hwahc, addr); - if (result < 0) - goto error_set_cluster_id; - - usb_hcd->uses_new_polling = 1; - set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); - usb_hcd->state = HC_STATE_RUNNING; - - /* - * prevent USB core from suspending the root hub since - * bus_suspend and bus_resume are not yet supported. - */ - pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev); - - result = 0; -out: - mutex_unlock(&wusbhc->mutex); - return result; - -error_set_cluster_id: - wusb_cluster_id_put(addr); -error_cluster_id_get: - goto out; - -} - -/* - * No need to abort pipes, as when this is called, all the children - * has been disconnected and that has done it [through - * usb_disable_interface() -> usb_disable_endpoint() -> - * hwahc_op_ep_disable() - >rpipe_ep_disable()]. - */ -static void hwahc_op_stop(struct usb_hcd *usb_hcd) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - - mutex_lock(&wusbhc->mutex); - wusb_cluster_id_put(wusbhc->cluster_id); - mutex_unlock(&wusbhc->mutex); -} - -static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - - /* - * We cannot query the HWA for the WUSB time since that requires sending - * a synchronous URB and this function can be called in_interrupt. - * Instead, query the USB frame number for our parent and use that. - */ - return usb_get_current_frame_number(wa->usb_dev); -} - -static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, - gfp_t gfp) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - - return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp); -} - -static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, - int status) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - - return wa_urb_dequeue(&hwahc->wa, urb, status); -} - -/* - * Release resources allocated for an endpoint - * - * If there is an associated rpipe to this endpoint, go ahead and put it. - */ -static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, - struct usb_host_endpoint *ep) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - - rpipe_ep_disable(&hwahc->wa, ep); -} - -static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) -{ - int result; - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct device *dev = &hwahc->wa.usb_iface->dev; - - result = __wa_set_feature(&hwahc->wa, WA_ENABLE); - if (result < 0) { - dev_err(dev, "error commanding HC to start: %d\n", result); - goto error_stop; - } - result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); - if (result < 0) { - dev_err(dev, "error waiting for HC to start: %d\n", result); - goto error_stop; - } - result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); - if (result < 0) { - dev_err(dev, "cannot listen to notifications: %d\n", result); - goto error_stop; - } - /* - * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set, - * disable transfer notifications. - */ - if (hwahc->wa.quirks & - WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) { - struct usb_host_interface *cur_altsetting = - hwahc->wa.usb_iface->cur_altsetting; - - result = usb_control_msg(hwahc->wa.usb_dev, - usb_sndctrlpipe(hwahc->wa.usb_dev, 0), - WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS, - USB_DIR_OUT | USB_TYPE_VENDOR | - USB_RECIP_INTERFACE, - WA_REQ_ALEREON_FEATURE_SET, - cur_altsetting->desc.bInterfaceNumber, - NULL, 0, - USB_CTRL_SET_TIMEOUT); - /* - * If we successfully sent the control message, start DTI here - * because no transfer notifications will be received which is - * where DTI is normally started. - */ - if (result == 0) - result = wa_dti_start(&hwahc->wa); - else - result = 0; /* OK. Continue normally. */ - - if (result < 0) { - dev_err(dev, "cannot start DTI: %d\n", result); - goto error_dti_start; - } - } - - return result; - -error_dti_start: - wa_nep_disarm(&hwahc->wa); -error_stop: - __wa_clear_feature(&hwahc->wa, WA_ENABLE); - return result; -} - -static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) -{ - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - int ret; - - ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_CHAN_STOP, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - delay * 1000, - iface_no, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (ret == 0) - msleep(delay); - - wa_nep_disarm(&hwahc->wa); - __wa_stop(&hwahc->wa); -} - -/* - * Set the UWB MAS allocation for the WUSB cluster - * - * @stream_index: stream to use (-1 for cancelling the allocation) - * @mas: mas bitmap to use - */ -static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, - const struct uwb_mas_bm *mas) -{ - int result; - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - struct device *dev = &wa->usb_iface->dev; - u8 mas_le[UWB_NUM_MAS/8]; - - /* Set the stream index */ - result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_SET_STREAM_IDX, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - stream_index, - wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Cannot set WUSB stream index: %d\n", result); - goto out; - } - uwb_mas_bm_copy_le(mas_le, mas); - /* Set the MAS allocation */ - result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_SET_WUSB_MAS, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - mas_le, 32, USB_CTRL_SET_TIMEOUT); - if (result < 0) - dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); -out: - return result; -} - -/* - * Add an IE to the host's MMC - * - * @interval: See WUSB1.0[8.5.3.1] - * @repeat_cnt: See WUSB1.0[8.5.3.1] - * @handle: See WUSB1.0[8.5.3.1] - * @wuie: Pointer to the header of the WUSB IE data to add. - * MUST BE allocated in a kmalloc buffer (no stack or - * vmalloc). - * - * NOTE: the format of the WUSB IEs for MMCs are different to the - * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length + - * Id in WUSB IEs). Standards...you gotta love'em. - */ -static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval, - u8 repeat_cnt, u8 handle, - struct wuie_hdr *wuie) -{ - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - - return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_ADD_MMC_IE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - interval << 8 | repeat_cnt, - handle << 8 | iface_no, - wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT); -} - -/* - * Remove an IE to the host's MMC - * - * @handle: See WUSB1.0[8.5.3.1] - */ -static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle) -{ - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_REMOVE_MMC_IE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, handle << 8 | iface_no, - NULL, 0, USB_CTRL_SET_TIMEOUT); -} - -/* - * Update device information for a given fake port - * - * @port_idx: Fake port to which device is connected (wusbhc index, not - * USB port number). - */ -static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc, - struct wusb_dev *wusb_dev) -{ - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - struct hwa_dev_info *dev_info; - int ret; - - /* fill out the Device Info buffer and send it */ - dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL); - if (!dev_info) - return -ENOMEM; - uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability, - &wusb_dev->availability); - dev_info->bDeviceAddress = wusb_dev->addr; - - /* - * If the descriptors haven't been read yet, use a default PHY - * rate of 53.3 Mbit/s only. The correct value will be used - * when this will be called again as part of the - * authentication process (which occurs after the descriptors - * have been read). - */ - if (wusb_dev->wusb_cap_descr) - dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates; - else - dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53); - - ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - WUSB_REQ_SET_DEV_INFO, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, wusb_dev->port_idx << 8 | iface_no, - dev_info, sizeof(struct hwa_dev_info), - USB_CTRL_SET_TIMEOUT); - kfree(dev_info); - return ret; -} - -/* - * Set host's idea of which encryption (and key) method to use when - * talking to ad evice on a given port. - * - * If key is NULL, it means disable encryption for that "virtual port" - * (used when we disconnect). - */ -static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, - const void *key, size_t key_size, - u8 key_idx) -{ - int result = -ENOMEM; - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - struct usb_key_descriptor *keyd; - size_t keyd_len; - - keyd_len = sizeof(*keyd) + key_size; - keyd = kzalloc(keyd_len, GFP_KERNEL); - if (keyd == NULL) - return -ENOMEM; - - keyd->bLength = keyd_len; - keyd->bDescriptorType = USB_DT_KEY; - keyd->tTKID[0] = (tkid >> 0) & 0xff; - keyd->tTKID[1] = (tkid >> 8) & 0xff; - keyd->tTKID[2] = (tkid >> 16) & 0xff; - memcpy(keyd->bKeyData, key, key_size); - - result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_SET_DESCRIPTOR, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - USB_DT_KEY << 8 | key_idx, - port_idx << 8 | iface_no, - keyd, keyd_len, USB_CTRL_SET_TIMEOUT); - - kzfree(keyd); /* clear keys etc. */ - return result; -} - -/* - * Set host's idea of which encryption (and key) method to use when - * talking to ad evice on a given port. - * - * If key is NULL, it means disable encryption for that "virtual port" - * (used when we disconnect). - */ -static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, - const void *key, size_t key_size) -{ - int result = -ENOMEM; - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); - struct wahc *wa = &hwahc->wa; - u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; - u8 encryption_value; - - /* Tell the host which key to use to talk to the device */ - if (key) { - u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK, - WUSB_KEY_INDEX_ORIGINATOR_HOST); - - result = __hwahc_dev_set_key(wusbhc, port_idx, tkid, - key, key_size, key_idx); - if (result < 0) - goto error_set_key; - encryption_value = wusbhc->ccm1_etd->bEncryptionValue; - } else { - /* FIXME: this should come from wusbhc->etd[UNSECURE].value */ - encryption_value = 0; - } - - /* Set the encryption type for communicating with the device */ - result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_SET_ENCRYPTION, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - encryption_value, port_idx << 8 | iface_no, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) - dev_err(wusbhc->dev, "Can't set host's WUSB encryption for " - "port index %u to %s (value %d): %d\n", port_idx, - wusb_et_name(wusbhc->ccm1_etd->bEncryptionType), - wusbhc->ccm1_etd->bEncryptionValue, result); -error_set_key: - return result; -} - -/* - * Set host's GTK key - */ -static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid, - const void *key, size_t key_size) -{ - u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, - WUSB_KEY_INDEX_ORIGINATOR_HOST); - - return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx); -} - -/* - * Get the Wire Adapter class-specific descriptor - * - * NOTE: this descriptor comes with the big bundled configuration - * descriptor that includes the interfaces' and endpoints', so - * we just look for it in the cached copy kept by the USB stack. - * - * NOTE2: We convert LE fields to CPU order. - */ -static int wa_fill_descr(struct wahc *wa) -{ - int result; - struct device *dev = &wa->usb_iface->dev; - char *itr; - struct usb_device *usb_dev = wa->usb_dev; - struct usb_descriptor_header *hdr; - struct usb_wa_descriptor *wa_descr; - size_t itr_size, actconfig_idx; - - actconfig_idx = (usb_dev->actconfig - usb_dev->config) / - sizeof(usb_dev->config[0]); - itr = usb_dev->rawdescriptors[actconfig_idx]; - itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); - while (itr_size >= sizeof(*hdr)) { - hdr = (struct usb_descriptor_header *) itr; - dev_dbg(dev, "Extra device descriptor: " - "type %02x/%u bytes @ %zu (%zu left)\n", - hdr->bDescriptorType, hdr->bLength, - (itr - usb_dev->rawdescriptors[actconfig_idx]), - itr_size); - if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) - goto found; - itr += hdr->bLength; - itr_size -= hdr->bLength; - } - dev_err(dev, "cannot find Wire Adapter Class descriptor\n"); - return -ENODEV; - -found: - result = -EINVAL; - if (hdr->bLength > itr_size) { /* is it available? */ - dev_err(dev, "incomplete Wire Adapter Class descriptor " - "(%zu bytes left, %u needed)\n", - itr_size, hdr->bLength); - goto error; - } - if (hdr->bLength < sizeof(*wa->wa_descr)) { - dev_err(dev, "short Wire Adapter Class descriptor\n"); - goto error; - } - wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; - if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100) - dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", - (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8, - le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff); - result = 0; -error: - return result; -} - -static const struct hc_driver hwahc_hc_driver = { - .description = "hwa-hcd", - .product_desc = "Wireless USB HWA host controller", - .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd), - .irq = NULL, /* FIXME */ - .flags = HCD_USB25, - .reset = hwahc_op_reset, - .start = hwahc_op_start, - .stop = hwahc_op_stop, - .get_frame_number = hwahc_op_get_frame_number, - .urb_enqueue = hwahc_op_urb_enqueue, - .urb_dequeue = hwahc_op_urb_dequeue, - .endpoint_disable = hwahc_op_endpoint_disable, - - .hub_status_data = wusbhc_rh_status_data, - .hub_control = wusbhc_rh_control, - .start_port_reset = wusbhc_rh_start_port_reset, -}; - -static int hwahc_security_create(struct hwahc *hwahc) -{ - int result; - struct wusbhc *wusbhc = &hwahc->wusbhc; - struct usb_device *usb_dev = hwahc->wa.usb_dev; - struct device *dev = &usb_dev->dev; - struct usb_security_descriptor *secd; - struct usb_encryption_descriptor *etd; - void *itr, *top; - size_t itr_size, needed, bytes; - u8 index; - char buf[64]; - - /* Find the host's security descriptors in the config descr bundle */ - index = (usb_dev->actconfig - usb_dev->config) / - sizeof(usb_dev->config[0]); - itr = usb_dev->rawdescriptors[index]; - itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); - top = itr + itr_size; - result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], - le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), - USB_DT_SECURITY, (void **) &secd, sizeof(*secd)); - if (result == -1) { - dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); - return 0; - } - needed = sizeof(*secd); - if (top - (void *)secd < needed) { - dev_err(dev, "BUG? Not enough data to process security " - "descriptor header (%zu bytes left vs %zu needed)\n", - top - (void *) secd, needed); - return 0; - } - needed = le16_to_cpu(secd->wTotalLength); - if (top - (void *)secd < needed) { - dev_err(dev, "BUG? Not enough data to process security " - "descriptors (%zu bytes left vs %zu needed)\n", - top - (void *) secd, needed); - return 0; - } - /* Walk over the sec descriptors and store CCM1's on wusbhc */ - itr = (void *) secd + sizeof(*secd); - top = (void *) secd + le16_to_cpu(secd->wTotalLength); - index = 0; - bytes = 0; - while (itr < top) { - etd = itr; - if (top - itr < sizeof(*etd)) { - dev_err(dev, "BUG: bad host security descriptor; " - "not enough data (%zu vs %zu left)\n", - top - itr, sizeof(*etd)); - break; - } - if (etd->bLength < sizeof(*etd)) { - dev_err(dev, "BUG: bad host encryption descriptor; " - "descriptor is too short " - "(%zu vs %zu needed)\n", - (size_t)etd->bLength, sizeof(*etd)); - break; - } - itr += etd->bLength; - bytes += snprintf(buf + bytes, sizeof(buf) - bytes, - "%s (0x%02x) ", - wusb_et_name(etd->bEncryptionType), - etd->bEncryptionValue); - wusbhc->ccm1_etd = etd; - } - dev_info(dev, "supported encryption types: %s\n", buf); - if (wusbhc->ccm1_etd == NULL) { - dev_err(dev, "E: host doesn't support CCM-1 crypto\n"); - return 0; - } - /* Pretty print what we support */ - return 0; -} - -static void hwahc_security_release(struct hwahc *hwahc) -{ - /* nothing to do here so far... */ -} - -static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface, - kernel_ulong_t quirks) -{ - int result; - struct device *dev = &iface->dev; - struct wusbhc *wusbhc = &hwahc->wusbhc; - struct wahc *wa = &hwahc->wa; - struct usb_device *usb_dev = interface_to_usbdev(iface); - - wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ - wa->usb_iface = usb_get_intf(iface); - wusbhc->dev = dev; - /* defer getting the uwb_rc handle until it is needed since it - * may not have been registered by the hwa_rc driver yet. */ - wusbhc->uwb_rc = NULL; - result = wa_fill_descr(wa); /* Get the device descriptor */ - if (result < 0) - goto error_fill_descriptor; - if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) { - dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB " - "adapter (%u ports)\n", wa->wa_descr->bNumPorts); - wusbhc->ports_max = USB_MAXCHILDREN; - } else { - wusbhc->ports_max = wa->wa_descr->bNumPorts; - } - wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs; - wusbhc->start = __hwahc_op_wusbhc_start; - wusbhc->stop = __hwahc_op_wusbhc_stop; - wusbhc->mmcie_add = __hwahc_op_mmcie_add; - wusbhc->mmcie_rm = __hwahc_op_mmcie_rm; - wusbhc->dev_info_set = __hwahc_op_dev_info_set; - wusbhc->bwa_set = __hwahc_op_bwa_set; - wusbhc->set_num_dnts = __hwahc_op_set_num_dnts; - wusbhc->set_ptk = __hwahc_op_set_ptk; - wusbhc->set_gtk = __hwahc_op_set_gtk; - result = hwahc_security_create(hwahc); - if (result < 0) { - dev_err(dev, "Can't initialize security: %d\n", result); - goto error_security_create; - } - wa->wusb = wusbhc; /* FIXME: ugly, need to fix */ - result = wusbhc_create(&hwahc->wusbhc); - if (result < 0) { - dev_err(dev, "Can't create WUSB HC structures: %d\n", result); - goto error_wusbhc_create; - } - result = wa_create(&hwahc->wa, iface, quirks); - if (result < 0) - goto error_wa_create; - return 0; - -error_wa_create: - wusbhc_destroy(&hwahc->wusbhc); -error_wusbhc_create: - /* WA Descr fill allocs no resources */ -error_security_create: -error_fill_descriptor: - usb_put_intf(iface); - usb_put_dev(usb_dev); - return result; -} - -static void hwahc_destroy(struct hwahc *hwahc) -{ - struct wusbhc *wusbhc = &hwahc->wusbhc; - - mutex_lock(&wusbhc->mutex); - __wa_destroy(&hwahc->wa); - wusbhc_destroy(&hwahc->wusbhc); - hwahc_security_release(hwahc); - hwahc->wusbhc.dev = NULL; - uwb_rc_put(wusbhc->uwb_rc); - usb_put_intf(hwahc->wa.usb_iface); - usb_put_dev(hwahc->wa.usb_dev); - mutex_unlock(&wusbhc->mutex); -} - -static void hwahc_init(struct hwahc *hwahc) -{ - wa_init(&hwahc->wa); -} - -static int hwahc_probe(struct usb_interface *usb_iface, - const struct usb_device_id *id) -{ - int result; - struct usb_hcd *usb_hcd; - struct wusbhc *wusbhc; - struct hwahc *hwahc; - struct device *dev = &usb_iface->dev; - - result = -ENOMEM; - usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); - if (usb_hcd == NULL) { - dev_err(dev, "unable to allocate instance\n"); - goto error_alloc; - } - usb_hcd->wireless = 1; - usb_hcd->self.sg_tablesize = ~0; - wusbhc = usb_hcd_to_wusbhc(usb_hcd); - hwahc = container_of(wusbhc, struct hwahc, wusbhc); - hwahc_init(hwahc); - result = hwahc_create(hwahc, usb_iface, id->driver_info); - if (result < 0) { - dev_err(dev, "Cannot initialize internals: %d\n", result); - goto error_hwahc_create; - } - result = usb_add_hcd(usb_hcd, 0, 0); - if (result < 0) { - dev_err(dev, "Cannot add HCD: %d\n", result); - goto error_add_hcd; - } - device_wakeup_enable(usb_hcd->self.controller); - result = wusbhc_b_create(&hwahc->wusbhc); - if (result < 0) { - dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); - goto error_wusbhc_b_create; - } - return 0; - -error_wusbhc_b_create: - usb_remove_hcd(usb_hcd); -error_add_hcd: - hwahc_destroy(hwahc); -error_hwahc_create: - usb_put_hcd(usb_hcd); -error_alloc: - return result; -} - -static void hwahc_disconnect(struct usb_interface *usb_iface) -{ - struct usb_hcd *usb_hcd; - struct wusbhc *wusbhc; - struct hwahc *hwahc; - - usb_hcd = usb_get_intfdata(usb_iface); - wusbhc = usb_hcd_to_wusbhc(usb_hcd); - hwahc = container_of(wusbhc, struct hwahc, wusbhc); - - wusbhc_b_destroy(&hwahc->wusbhc); - usb_remove_hcd(usb_hcd); - hwahc_destroy(hwahc); - usb_put_hcd(usb_hcd); -} - -static const struct usb_device_id hwahc_id_table[] = { - /* Alereon 5310 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01), - .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC | - WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS }, - /* Alereon 5611 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01), - .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC | - WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS }, - /* FIXME: use class labels for this */ - { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, - {}, -}; -MODULE_DEVICE_TABLE(usb, hwahc_id_table); - -static struct usb_driver hwahc_driver = { - .name = "hwa-hc", - .probe = hwahc_probe, - .disconnect = hwahc_disconnect, - .id_table = hwahc_id_table, -}; - -module_usb_driver(hwahc_driver); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wusbcore/host/whci/Makefile b/drivers/staging/wusbcore/host/whci/Makefile deleted file mode 100644 index 859d20079df6..000000000000 --- a/drivers/staging/wusbcore/host/whci/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o - -whci-hcd-y := \ - asl.o \ - debug.o \ - hcd.o \ - hw.o \ - init.o \ - int.o \ - pzl.o \ - qset.o \ - wusb.o diff --git a/drivers/staging/wusbcore/host/whci/asl.c b/drivers/staging/wusbcore/host/whci/asl.c deleted file mode 100644 index a2b9a50cfb80..000000000000 --- a/drivers/staging/wusbcore/host/whci/asl.c +++ /dev/null @@ -1,376 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) asynchronous schedule management. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/gfp.h> -#include <linux/dma-mapping.h> -#include <linux/usb.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, - struct whc_qset **next, struct whc_qset **prev) -{ - struct list_head *n, *p; - - BUG_ON(list_empty(&whc->async_list)); - - n = qset->list_node.next; - if (n == &whc->async_list) - n = n->next; - p = qset->list_node.prev; - if (p == &whc->async_list) - p = p->prev; - - *next = container_of(n, struct whc_qset, list_node); - *prev = container_of(p, struct whc_qset, list_node); - -} - -static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset) -{ - list_move(&qset->list_node, &whc->async_list); - qset->in_sw_list = true; -} - -static void asl_qset_insert(struct whc *whc, struct whc_qset *qset) -{ - struct whc_qset *next, *prev; - - qset_clear(whc, qset); - - /* Link into ASL. */ - qset_get_next_prev(whc, qset, &next, &prev); - whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma); - whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma); - qset->in_hw_list = true; -} - -static void asl_qset_remove(struct whc *whc, struct whc_qset *qset) -{ - struct whc_qset *prev, *next; - - qset_get_next_prev(whc, qset, &next, &prev); - - list_move(&qset->list_node, &whc->async_removed_list); - qset->in_sw_list = false; - - /* - * No more qsets in the ASL? The caller must stop the ASL as - * it's no longer valid. - */ - if (list_empty(&whc->async_list)) - return; - - /* Remove from ASL. */ - whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma); - qset->in_hw_list = false; -} - -/** - * process_qset - process any recently inactivated or halted qTDs in a - * qset. - * - * After inactive qTDs are removed, new qTDs can be added if the - * urb queue still contains URBs. - * - * Returns any additional WUSBCMD bits for the ASL sync command (i.e., - * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed). - */ -static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) -{ - enum whc_update update = 0; - uint32_t status = 0; - - while (qset->ntds) { - struct whc_qtd *td; - - td = &qset->qtd[qset->td_start]; - status = le32_to_cpu(td->status); - - /* - * Nothing to do with a still active qTD. - */ - if (status & QTD_STS_ACTIVE) - break; - - if (status & QTD_STS_HALTED) { - /* Ug, an error. */ - process_halted_qtd(whc, qset, td); - /* A halted qTD always triggers an update - because the qset was either removed or - reactivated. */ - update |= WHC_UPDATE_UPDATED; - goto done; - } - - /* Mmm, a completed qTD. */ - process_inactive_qtd(whc, qset, td); - } - - if (!qset->remove) - update |= qset_add_qtds(whc, qset); - -done: - /* - * Remove this qset from the ASL if requested, but only if has - * no qTDs. - */ - if (qset->remove && qset->ntds == 0) { - asl_qset_remove(whc, qset); - update |= WHC_UPDATE_REMOVED; - } - return update; -} - -void asl_start(struct whc *whc) -{ - struct whc_qset *qset; - - qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); - - le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR); - - whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN); - whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, - WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED, - 1000, "start ASL"); -} - -void asl_stop(struct whc *whc) -{ - whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0); - whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, - WUSBSTS_ASYNC_SCHED, 0, - 1000, "stop ASL"); -} - -/** - * asl_update - request an ASL update and wait for the hardware to be synced - * @whc: the WHCI HC - * @wusbcmd: WUSBCMD value to start the update. - * - * If the WUSB HC is inactive (i.e., the ASL is stopped) then the - * update must be skipped as the hardware may not respond to update - * requests. - */ -void asl_update(struct whc *whc, uint32_t wusbcmd) -{ - struct wusbhc *wusbhc = &whc->wusbhc; - long t; - - mutex_lock(&wusbhc->mutex); - if (wusbhc->active) { - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); - t = wait_event_timeout( - whc->async_list_wq, - (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0, - msecs_to_jiffies(1000)); - if (t == 0) - whc_hw_error(whc, "ASL update timeout"); - } - mutex_unlock(&wusbhc->mutex); -} - -/** - * scan_async_work - scan the ASL for qsets to process. - * - * Process each qset in the ASL in turn and then signal the WHC that - * the ASL has been updated. - * - * Then start, stop or update the asynchronous schedule as required. - */ -void scan_async_work(struct work_struct *work) -{ - struct whc *whc = container_of(work, struct whc, async_work); - struct whc_qset *qset, *t; - enum whc_update update = 0; - - spin_lock_irq(&whc->lock); - - /* - * Transerve the software list backwards so new qsets can be - * safely inserted into the ASL without making it non-circular. - */ - list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) { - if (!qset->in_hw_list) { - asl_qset_insert(whc, qset); - update |= WHC_UPDATE_ADDED; - } - - update |= process_qset(whc, qset); - } - - spin_unlock_irq(&whc->lock); - - if (update) { - uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB; - if (update & WHC_UPDATE_REMOVED) - wusbcmd |= WUSBCMD_ASYNC_QSET_RM; - asl_update(whc, wusbcmd); - } - - /* - * Now that the ASL is updated, complete the removal of any - * removed qsets. - * - * If the qset was to be reset, do so and reinsert it into the - * ASL if it has pending transfers. - */ - spin_lock_irq(&whc->lock); - - list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { - qset_remove_complete(whc, qset); - if (qset->reset) { - qset_reset(whc, qset); - if (!list_empty(&qset->stds)) { - asl_qset_insert_begin(whc, qset); - queue_work(whc->workqueue, &whc->async_work); - } - } - } - - spin_unlock_irq(&whc->lock); -} - -/** - * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL). - * @whc: the WHCI host controller - * @urb: the URB to enqueue - * @mem_flags: flags for any memory allocations - * - * The qset for the endpoint is obtained and the urb queued on to it. - * - * Work is scheduled to update the hardware's view of the ASL. - */ -int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) -{ - struct whc_qset *qset; - int err; - unsigned long flags; - - spin_lock_irqsave(&whc->lock, flags); - - err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); - if (err < 0) { - spin_unlock_irqrestore(&whc->lock, flags); - return err; - } - - qset = get_qset(whc, urb, GFP_ATOMIC); - if (qset == NULL) - err = -ENOMEM; - else - err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); - if (!err) { - if (!qset->in_sw_list && !qset->remove) - asl_qset_insert_begin(whc, qset); - } else - usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); - - spin_unlock_irqrestore(&whc->lock, flags); - - if (!err) - queue_work(whc->workqueue, &whc->async_work); - - return err; -} - -/** - * asl_urb_dequeue - remove an URB (qset) from the async list. - * @whc: the WHCI host controller - * @urb: the URB to dequeue - * @status: the current status of the URB - * - * URBs that do yet have qTDs can simply be removed from the software - * queue, otherwise the qset must be removed from the ASL so the qTDs - * can be removed. - */ -int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) -{ - struct whc_urb *wurb = urb->hcpriv; - struct whc_qset *qset = wurb->qset; - struct whc_std *std, *t; - bool has_qtd = false; - int ret; - unsigned long flags; - - spin_lock_irqsave(&whc->lock, flags); - - ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); - if (ret < 0) - goto out; - - list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) { - if (std->qtd) - has_qtd = true; - qset_free_std(whc, std); - } else - std->qtd = NULL; /* so this std is re-added when the qset is */ - } - - if (has_qtd) { - asl_qset_remove(whc, qset); - wurb->status = status; - wurb->is_async = true; - queue_work(whc->workqueue, &wurb->dequeue_work); - } else - qset_remove_urb(whc, qset, urb, status); -out: - spin_unlock_irqrestore(&whc->lock, flags); - - return ret; -} - -/** - * asl_qset_delete - delete a qset from the ASL - */ -void asl_qset_delete(struct whc *whc, struct whc_qset *qset) -{ - qset->remove = 1; - queue_work(whc->workqueue, &whc->async_work); - qset_delete(whc, qset); -} - -/** - * asl_init - initialize the asynchronous schedule list - * - * A dummy qset with no qTDs is added to the ASL to simplify removing - * qsets (no need to stop the ASL when the last qset is removed). - */ -int asl_init(struct whc *whc) -{ - struct whc_qset *qset; - - qset = qset_alloc(whc, GFP_KERNEL); - if (qset == NULL) - return -ENOMEM; - - asl_qset_insert_begin(whc, qset); - asl_qset_insert(whc, qset); - - return 0; -} - -/** - * asl_clean_up - free ASL resources - * - * The ASL is stopped and empty except for the dummy qset. - */ -void asl_clean_up(struct whc *whc) -{ - struct whc_qset *qset; - - if (!list_empty(&whc->async_list)) { - qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); - list_del(&qset->list_node); - qset_free(whc, qset); - } -} diff --git a/drivers/staging/wusbcore/host/whci/debug.c b/drivers/staging/wusbcore/host/whci/debug.c deleted file mode 100644 index 443da6719147..000000000000 --- a/drivers/staging/wusbcore/host/whci/debug.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) debug. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/export.h> - -#include "../../wusbhc.h" - -#include "whcd.h" - -struct whc_dbg { - struct dentry *di_f; - struct dentry *asl_f; - struct dentry *pzl_f; -}; - -static void qset_print(struct seq_file *s, struct whc_qset *qset) -{ - static const char *qh_type[] = { - "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", }; - struct whc_std *std; - struct urb *urb = NULL; - int i; - - seq_printf(s, "qset %08x", (u32)qset->qset_dma); - if (&qset->list_node == qset->whc->async_list.prev) { - seq_printf(s, " (dummy)\n"); - } else { - seq_printf(s, " ep%d%s-%s maxpkt: %d\n", - qset->qh.info1 & 0x0f, - (qset->qh.info1 >> 4) & 0x1 ? "in" : "out", - qh_type[(qset->qh.info1 >> 5) & 0x7], - (qset->qh.info1 >> 16) & 0xffff); - } - seq_printf(s, " -> %08x\n", (u32)qset->qh.link); - seq_printf(s, " info: %08x %08x %08x\n", - qset->qh.info1, qset->qh.info2, qset->qh.info3); - seq_printf(s, " sts: %04x errs: %d curwin: %08x\n", - qset->qh.status, qset->qh.err_count, qset->qh.cur_window); - seq_printf(s, " TD: sts: %08x opts: %08x\n", - qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); - - for (i = 0; i < WHCI_QSET_TD_MAX; i++) { - seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", - i == qset->td_start ? 'S' : ' ', - i == qset->td_end ? 'E' : ' ', - i, qset->qtd[i].status, qset->qtd[i].options, - (u32)qset->qtd[i].page_list_ptr); - } - seq_printf(s, " ntds: %d\n", qset->ntds); - list_for_each_entry(std, &qset->stds, list_node) { - if (urb != std->urb) { - urb = std->urb; - seq_printf(s, " urb %p transferred: %d bytes\n", urb, - urb->actual_length); - } - if (std->qtd) - seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n", - std->qtd - &qset->qtd[0], - std->len, std->num_pointers ? - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); - else - seq_printf(s, " sTD[-]: %zd bytes @ %08x\n", - std->len, std->num_pointers ? - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); - } -} - -static int di_show(struct seq_file *s, void *p) -{ - struct whc *whc = s->private; - int d; - - for (d = 0; d < whc->n_devices; d++) { - struct di_buf_entry *di = &whc->di_buf[d]; - - seq_printf(s, "DI[%d]\n", d); - seq_printf(s, " availability: %*pb\n", - UWB_NUM_MAS, (unsigned long *)di->availability_info); - seq_printf(s, " %c%c key idx: %d dev addr: %d\n", - (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', - (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', - (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, - (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); - } - return 0; -} -DEFINE_SHOW_ATTRIBUTE(di); - -static int asl_show(struct seq_file *s, void *p) -{ - struct whc *whc = s->private; - struct whc_qset *qset; - - list_for_each_entry(qset, &whc->async_list, list_node) { - qset_print(s, qset); - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(asl); - -static int pzl_show(struct seq_file *s, void *p) -{ - struct whc *whc = s->private; - struct whc_qset *qset; - int period; - - for (period = 0; period < 5; period++) { - seq_printf(s, "Period %d\n", period); - list_for_each_entry(qset, &whc->periodic_list[period], list_node) { - qset_print(s, qset); - } - } - return 0; -} -DEFINE_SHOW_ATTRIBUTE(pzl); - -void whc_dbg_init(struct whc *whc) -{ - if (whc->wusbhc.pal.debugfs_dir == NULL) - return; - - whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL); - if (whc->dbg == NULL) - return; - - whc->dbg->di_f = debugfs_create_file("di", 0444, - whc->wusbhc.pal.debugfs_dir, whc, - &di_fops); - whc->dbg->asl_f = debugfs_create_file("asl", 0444, - whc->wusbhc.pal.debugfs_dir, whc, - &asl_fops); - whc->dbg->pzl_f = debugfs_create_file("pzl", 0444, - whc->wusbhc.pal.debugfs_dir, whc, - &pzl_fops); -} - -void whc_dbg_clean_up(struct whc *whc) -{ - if (whc->dbg) { - debugfs_remove(whc->dbg->pzl_f); - debugfs_remove(whc->dbg->asl_f); - debugfs_remove(whc->dbg->di_f); - kfree(whc->dbg); - } -} diff --git a/drivers/staging/wusbcore/host/whci/hcd.c b/drivers/staging/wusbcore/host/whci/hcd.c deleted file mode 100644 index bee1ff2d35be..000000000000 --- a/drivers/staging/wusbcore/host/whci/hcd.c +++ /dev/null @@ -1,356 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) driver. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/module.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -/* - * One time initialization. - * - * Nothing to do here. - */ -static int whc_reset(struct usb_hcd *usb_hcd) -{ - return 0; -} - -/* - * Start the wireless host controller. - * - * Start device notification. - * - * Put hc into run state, set DNTS parameters. - */ -static int whc_start(struct usb_hcd *usb_hcd) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - u8 bcid; - int ret; - - mutex_lock(&wusbhc->mutex); - - le_writel(WUSBINTR_GEN_CMD_DONE - | WUSBINTR_HOST_ERR - | WUSBINTR_ASYNC_SCHED_SYNCED - | WUSBINTR_DNTS_INT - | WUSBINTR_ERR_INT - | WUSBINTR_INT, - whc->base + WUSBINTR); - - /* set cluster ID */ - bcid = wusb_cluster_id_get(); - ret = whc_set_cluster_id(whc, bcid); - if (ret < 0) - goto out; - wusbhc->cluster_id = bcid; - - /* start HC */ - whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN); - - usb_hcd->uses_new_polling = 1; - set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); - usb_hcd->state = HC_STATE_RUNNING; - -out: - mutex_unlock(&wusbhc->mutex); - return ret; -} - - -/* - * Stop the wireless host controller. - * - * Stop device notification. - * - * Wait for pending transfer to stop? Put hc into stop state? - */ -static void whc_stop(struct usb_hcd *usb_hcd) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - - mutex_lock(&wusbhc->mutex); - - /* stop HC */ - le_writel(0, whc->base + WUSBINTR); - whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); - whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, - WUSBSTS_HCHALTED, WUSBSTS_HCHALTED, - 100, "HC to halt"); - - wusb_cluster_id_put(wusbhc->cluster_id); - - mutex_unlock(&wusbhc->mutex); -} - -static int whc_get_frame_number(struct usb_hcd *usb_hcd) -{ - /* Frame numbers are not applicable to WUSB. */ - return -ENOSYS; -} - - -/* - * Queue an URB to the ASL or PZL - */ -static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, - gfp_t mem_flags) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - int ret; - - switch (usb_pipetype(urb->pipe)) { - case PIPE_INTERRUPT: - ret = pzl_urb_enqueue(whc, urb, mem_flags); - break; - case PIPE_ISOCHRONOUS: - dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); - ret = -ENOTSUPP; - break; - case PIPE_CONTROL: - case PIPE_BULK: - default: - ret = asl_urb_enqueue(whc, urb, mem_flags); - break; - } - - return ret; -} - -/* - * Remove a queued URB from the ASL or PZL. - */ -static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - int ret; - - switch (usb_pipetype(urb->pipe)) { - case PIPE_INTERRUPT: - ret = pzl_urb_dequeue(whc, urb, status); - break; - case PIPE_ISOCHRONOUS: - ret = -ENOTSUPP; - break; - case PIPE_CONTROL: - case PIPE_BULK: - default: - ret = asl_urb_dequeue(whc, urb, status); - break; - } - - return ret; -} - -/* - * Wait for all URBs to the endpoint to be completed, then delete the - * qset. - */ -static void whc_endpoint_disable(struct usb_hcd *usb_hcd, - struct usb_host_endpoint *ep) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - struct whc_qset *qset; - - qset = ep->hcpriv; - if (qset) { - ep->hcpriv = NULL; - if (usb_endpoint_xfer_bulk(&ep->desc) - || usb_endpoint_xfer_control(&ep->desc)) - asl_qset_delete(whc, qset); - else - pzl_qset_delete(whc, qset); - } -} - -static void whc_endpoint_reset(struct usb_hcd *usb_hcd, - struct usb_host_endpoint *ep) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - struct whc_qset *qset; - unsigned long flags; - - spin_lock_irqsave(&whc->lock, flags); - - qset = ep->hcpriv; - if (qset) { - qset->remove = 1; - qset->reset = 1; - - if (usb_endpoint_xfer_bulk(&ep->desc) - || usb_endpoint_xfer_control(&ep->desc)) - queue_work(whc->workqueue, &whc->async_work); - else - queue_work(whc->workqueue, &whc->periodic_work); - } - - spin_unlock_irqrestore(&whc->lock, flags); -} - - -static const struct hc_driver whc_hc_driver = { - .description = "whci-hcd", - .product_desc = "Wireless host controller", - .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd), - .irq = whc_int_handler, - .flags = HCD_USB2, - - .reset = whc_reset, - .start = whc_start, - .stop = whc_stop, - .get_frame_number = whc_get_frame_number, - .urb_enqueue = whc_urb_enqueue, - .urb_dequeue = whc_urb_dequeue, - .endpoint_disable = whc_endpoint_disable, - .endpoint_reset = whc_endpoint_reset, - - .hub_status_data = wusbhc_rh_status_data, - .hub_control = wusbhc_rh_control, - .start_port_reset = wusbhc_rh_start_port_reset, -}; - -static int whc_probe(struct umc_dev *umc) -{ - int ret; - struct usb_hcd *usb_hcd; - struct wusbhc *wusbhc; - struct whc *whc; - struct device *dev = &umc->dev; - - usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci"); - if (usb_hcd == NULL) { - dev_err(dev, "unable to create hcd\n"); - return -ENOMEM; - } - - usb_hcd->wireless = 1; - usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ - - wusbhc = usb_hcd_to_wusbhc(usb_hcd); - whc = wusbhc_to_whc(wusbhc); - whc->umc = umc; - - ret = whc_init(whc); - if (ret) - goto error_whc_init; - - wusbhc->dev = dev; - wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent); - if (!wusbhc->uwb_rc) { - ret = -ENODEV; - dev_err(dev, "cannot get radio controller\n"); - goto error_uwb_rc; - } - - if (whc->n_devices > USB_MAXCHILDREN) { - dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n", - whc->n_devices); - wusbhc->ports_max = USB_MAXCHILDREN; - } else - wusbhc->ports_max = whc->n_devices; - wusbhc->mmcies_max = whc->n_mmc_ies; - wusbhc->start = whc_wusbhc_start; - wusbhc->stop = whc_wusbhc_stop; - wusbhc->mmcie_add = whc_mmcie_add; - wusbhc->mmcie_rm = whc_mmcie_rm; - wusbhc->dev_info_set = whc_dev_info_set; - wusbhc->bwa_set = whc_bwa_set; - wusbhc->set_num_dnts = whc_set_num_dnts; - wusbhc->set_ptk = whc_set_ptk; - wusbhc->set_gtk = whc_set_gtk; - - ret = wusbhc_create(wusbhc); - if (ret) - goto error_wusbhc_create; - - ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED); - if (ret) { - dev_err(dev, "cannot add HCD: %d\n", ret); - goto error_usb_add_hcd; - } - device_wakeup_enable(usb_hcd->self.controller); - - ret = wusbhc_b_create(wusbhc); - if (ret) { - dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret); - goto error_wusbhc_b_create; - } - - whc_dbg_init(whc); - - return 0; - -error_wusbhc_b_create: - usb_remove_hcd(usb_hcd); -error_usb_add_hcd: - wusbhc_destroy(wusbhc); -error_wusbhc_create: - uwb_rc_put(wusbhc->uwb_rc); -error_uwb_rc: - whc_clean_up(whc); -error_whc_init: - usb_put_hcd(usb_hcd); - return ret; -} - - -static void whc_remove(struct umc_dev *umc) -{ - struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev); - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - - if (usb_hcd) { - whc_dbg_clean_up(whc); - wusbhc_b_destroy(wusbhc); - usb_remove_hcd(usb_hcd); - wusbhc_destroy(wusbhc); - uwb_rc_put(wusbhc->uwb_rc); - whc_clean_up(whc); - usb_put_hcd(usb_hcd); - } -} - -static struct umc_driver whci_hc_driver = { - .name = "whci-hcd", - .cap_id = UMC_CAP_ID_WHCI_WUSB_HC, - .probe = whc_probe, - .remove = whc_remove, -}; - -static int __init whci_hc_driver_init(void) -{ - return umc_driver_register(&whci_hc_driver); -} -module_init(whci_hc_driver_init); - -static void __exit whci_hc_driver_exit(void) -{ - umc_driver_unregister(&whci_hc_driver); -} -module_exit(whci_hc_driver_exit); - -/* PCI device ID's that we handle (so it gets loaded) */ -static struct pci_device_id __used whci_hcd_id_table[] = { - { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, - { /* empty last entry */ } -}; -MODULE_DEVICE_TABLE(pci, whci_hcd_id_table); - -MODULE_DESCRIPTION("WHCI Wireless USB host controller driver"); -MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wusbcore/host/whci/hw.c b/drivers/staging/wusbcore/host/whci/hw.c deleted file mode 100644 index e4e8914abf42..000000000000 --- a/drivers/staging/wusbcore/host/whci/hw.c +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) hardware access helpers. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/dma-mapping.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val) -{ - unsigned long flags; - u32 cmd; - - spin_lock_irqsave(&whc->lock, flags); - - cmd = le_readl(whc->base + WUSBCMD); - cmd = (cmd & ~mask) | val; - le_writel(cmd, whc->base + WUSBCMD); - - spin_unlock_irqrestore(&whc->lock, flags); -} - -/** - * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register - * @whc: the WHCI HC - * @cmd: command to start. - * @params: parameters for the command (the WUSBGENCMDPARAMS register value). - * @addr: pointer to any data for the command (may be NULL). - * @len: length of the data (if any). - */ -int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) -{ - unsigned long flags; - dma_addr_t dma_addr; - int t; - int ret = 0; - - mutex_lock(&whc->mutex); - - /* Wait for previous command to complete. */ - t = wait_event_timeout(whc->cmd_wq, - (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0, - WHC_GENCMD_TIMEOUT_MS); - if (t == 0) { - dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", - le_readl(whc->base + WUSBGENCMDSTS), - le_readl(whc->base + WUSBGENCMDPARAMS)); - ret = -ETIMEDOUT; - goto out; - } - - if (addr) { - memcpy(whc->gen_cmd_buf, addr, len); - dma_addr = whc->gen_cmd_buf_dma; - } else - dma_addr = 0; - - /* Poke registers to start cmd. */ - spin_lock_irqsave(&whc->lock, flags); - - le_writel(params, whc->base + WUSBGENCMDPARAMS); - le_writeq(dma_addr, whc->base + WUSBGENADDR); - - le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd, - whc->base + WUSBGENCMDSTS); - - spin_unlock_irqrestore(&whc->lock, flags); -out: - mutex_unlock(&whc->mutex); - - return ret; -} - -/** - * whc_hw_error - recover from a hardware error - * @whc: the WHCI HC that broke. - * @reason: a description of the failure. - * - * Recover from broken hardware with a full reset. - */ -void whc_hw_error(struct whc *whc, const char *reason) -{ - struct wusbhc *wusbhc = &whc->wusbhc; - - dev_err(&whc->umc->dev, "hardware error: %s\n", reason); - wusbhc_reset_all(wusbhc); -} diff --git a/drivers/staging/wusbcore/host/whci/init.c b/drivers/staging/wusbcore/host/whci/init.c deleted file mode 100644 index 55fd458a8f30..000000000000 --- a/drivers/staging/wusbcore/host/whci/init.c +++ /dev/null @@ -1,177 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) initialization. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/gfp.h> -#include <linux/dma-mapping.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -/* - * Reset the host controller. - */ -static void whc_hw_reset(struct whc *whc) -{ - le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); - whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, - 100, "reset"); -} - -static void whc_hw_init_di_buf(struct whc *whc) -{ - int d; - - /* Disable all entries in the Device Information buffer. */ - for (d = 0; d < whc->n_devices; d++) - whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; - - le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); -} - -static void whc_hw_init_dn_buf(struct whc *whc) -{ - /* Clear the Device Notification buffer to ensure the V (valid) - * bits are clear. */ - memset(whc->dn_buf, 0, 4096); - - le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); -} - -int whc_init(struct whc *whc) -{ - u32 whcsparams; - int ret, i; - resource_size_t start, len; - - spin_lock_init(&whc->lock); - mutex_init(&whc->mutex); - init_waitqueue_head(&whc->cmd_wq); - init_waitqueue_head(&whc->async_list_wq); - init_waitqueue_head(&whc->periodic_list_wq); - whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0); - if (whc->workqueue == NULL) { - ret = -ENOMEM; - goto error; - } - INIT_WORK(&whc->dn_work, whc_dn_work); - - INIT_WORK(&whc->async_work, scan_async_work); - INIT_LIST_HEAD(&whc->async_list); - INIT_LIST_HEAD(&whc->async_removed_list); - - INIT_WORK(&whc->periodic_work, scan_periodic_work); - for (i = 0; i < 5; i++) - INIT_LIST_HEAD(&whc->periodic_list[i]); - INIT_LIST_HEAD(&whc->periodic_removed_list); - - /* Map HC registers. */ - start = whc->umc->resource.start; - len = whc->umc->resource.end - start + 1; - if (!request_mem_region(start, len, "whci-hc")) { - dev_err(&whc->umc->dev, "can't request HC region\n"); - ret = -EBUSY; - goto error; - } - whc->base_phys = start; - whc->base = ioremap(start, len); - if (!whc->base) { - dev_err(&whc->umc->dev, "ioremap\n"); - ret = -ENOMEM; - goto error; - } - - whc_hw_reset(whc); - - /* Read maximum number of devices, keys and MMC IEs. */ - whcsparams = le_readl(whc->base + WHCSPARAMS); - whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); - whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); - whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); - - dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", - whc->n_devices, whc->n_keys, whc->n_mmc_ies); - - whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, - sizeof(struct whc_qset), 64, 0); - if (whc->qset_pool == NULL) { - ret = -ENOMEM; - goto error; - } - - ret = asl_init(whc); - if (ret < 0) - goto error; - ret = pzl_init(whc); - if (ret < 0) - goto error; - - /* Allocate and initialize a buffer for generic commands, the - Device Information buffer, and the Device Notification - buffer. */ - - whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, - &whc->gen_cmd_buf_dma, GFP_KERNEL); - if (whc->gen_cmd_buf == NULL) { - ret = -ENOMEM; - goto error; - } - - whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, - sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, - &whc->dn_buf_dma, GFP_KERNEL); - if (!whc->dn_buf) { - ret = -ENOMEM; - goto error; - } - whc_hw_init_dn_buf(whc); - - whc->di_buf = dma_alloc_coherent(&whc->umc->dev, - sizeof(struct di_buf_entry) * whc->n_devices, - &whc->di_buf_dma, GFP_KERNEL); - if (!whc->di_buf) { - ret = -ENOMEM; - goto error; - } - whc_hw_init_di_buf(whc); - - return 0; - -error: - whc_clean_up(whc); - return ret; -} - -void whc_clean_up(struct whc *whc) -{ - resource_size_t len; - - if (whc->di_buf) - dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, - whc->di_buf, whc->di_buf_dma); - if (whc->dn_buf) - dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, - whc->dn_buf, whc->dn_buf_dma); - if (whc->gen_cmd_buf) - dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, - whc->gen_cmd_buf, whc->gen_cmd_buf_dma); - - pzl_clean_up(whc); - asl_clean_up(whc); - - dma_pool_destroy(whc->qset_pool); - - len = resource_size(&whc->umc->resource); - if (whc->base) - iounmap(whc->base); - if (whc->base_phys) - release_mem_region(whc->base_phys, len); - - if (whc->workqueue) - destroy_workqueue(whc->workqueue); -} diff --git a/drivers/staging/wusbcore/host/whci/int.c b/drivers/staging/wusbcore/host/whci/int.c deleted file mode 100644 index bdbe35e9366f..000000000000 --- a/drivers/staging/wusbcore/host/whci/int.c +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) interrupt handling. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -static void transfer_done(struct whc *whc) -{ - queue_work(whc->workqueue, &whc->async_work); - queue_work(whc->workqueue, &whc->periodic_work); -} - -irqreturn_t whc_int_handler(struct usb_hcd *hcd) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd); - struct whc *whc = wusbhc_to_whc(wusbhc); - u32 sts; - - sts = le_readl(whc->base + WUSBSTS); - if (!(sts & WUSBSTS_INT_MASK)) - return IRQ_NONE; - le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS); - - if (sts & WUSBSTS_GEN_CMD_DONE) - wake_up(&whc->cmd_wq); - - if (sts & WUSBSTS_HOST_ERR) - dev_err(&whc->umc->dev, "FIXME: host system error\n"); - - if (sts & WUSBSTS_ASYNC_SCHED_SYNCED) - wake_up(&whc->async_list_wq); - - if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED) - wake_up(&whc->periodic_list_wq); - - if (sts & WUSBSTS_DNTS_INT) - queue_work(whc->workqueue, &whc->dn_work); - - /* - * A transfer completed (see [WHCI] section 4.7.1.2 for when - * this occurs). - */ - if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT)) - transfer_done(whc); - - return IRQ_HANDLED; -} - -static int process_dn_buf(struct whc *whc) -{ - struct wusbhc *wusbhc = &whc->wusbhc; - struct dn_buf_entry *dn; - int processed = 0; - - for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) { - if (dn->status & WHC_DN_STATUS_VALID) { - wusbhc_handle_dn(wusbhc, dn->src_addr, - (struct wusb_dn_hdr *)dn->dn_data, - dn->msg_size); - dn->status &= ~WHC_DN_STATUS_VALID; - processed++; - } - } - return processed; -} - -void whc_dn_work(struct work_struct *work) -{ - struct whc *whc = container_of(work, struct whc, dn_work); - int processed; - - do { - processed = process_dn_buf(whc); - } while (processed); -} diff --git a/drivers/staging/wusbcore/host/whci/pzl.c b/drivers/staging/wusbcore/host/whci/pzl.c deleted file mode 100644 index 6dfc075f5798..000000000000 --- a/drivers/staging/wusbcore/host/whci/pzl.c +++ /dev/null @@ -1,404 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) periodic schedule management. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/gfp.h> -#include <linux/dma-mapping.h> -#include <linux/usb.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -static void update_pzl_pointers(struct whc *whc, int period, u64 addr) -{ - switch (period) { - case 0: - whc_qset_set_link_ptr(&whc->pz_list[0], addr); - whc_qset_set_link_ptr(&whc->pz_list[2], addr); - whc_qset_set_link_ptr(&whc->pz_list[4], addr); - whc_qset_set_link_ptr(&whc->pz_list[6], addr); - whc_qset_set_link_ptr(&whc->pz_list[8], addr); - whc_qset_set_link_ptr(&whc->pz_list[10], addr); - whc_qset_set_link_ptr(&whc->pz_list[12], addr); - whc_qset_set_link_ptr(&whc->pz_list[14], addr); - break; - case 1: - whc_qset_set_link_ptr(&whc->pz_list[1], addr); - whc_qset_set_link_ptr(&whc->pz_list[5], addr); - whc_qset_set_link_ptr(&whc->pz_list[9], addr); - whc_qset_set_link_ptr(&whc->pz_list[13], addr); - break; - case 2: - whc_qset_set_link_ptr(&whc->pz_list[3], addr); - whc_qset_set_link_ptr(&whc->pz_list[11], addr); - break; - case 3: - whc_qset_set_link_ptr(&whc->pz_list[7], addr); - break; - case 4: - whc_qset_set_link_ptr(&whc->pz_list[15], addr); - break; - } -} - -/* - * Return the 'period' to use for this qset. The minimum interval for - * the endpoint is used so whatever urbs are submitted the device is - * polled often enough. - */ -static int qset_get_period(struct whc *whc, struct whc_qset *qset) -{ - uint8_t bInterval = qset->ep->desc.bInterval; - - if (bInterval < 6) - bInterval = 6; - if (bInterval > 10) - bInterval = 10; - return bInterval - 6; -} - -static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset) -{ - int period; - - period = qset_get_period(whc, qset); - - qset_clear(whc, qset); - list_move(&qset->list_node, &whc->periodic_list[period]); - qset->in_sw_list = true; -} - -static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset) -{ - list_move(&qset->list_node, &whc->periodic_removed_list); - qset->in_hw_list = false; - qset->in_sw_list = false; -} - -/** - * pzl_process_qset - process any recently inactivated or halted qTDs - * in a qset. - * - * After inactive qTDs are removed, new qTDs can be added if the - * urb queue still contains URBs. - * - * Returns the schedule updates required. - */ -static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) -{ - enum whc_update update = 0; - uint32_t status = 0; - - while (qset->ntds) { - struct whc_qtd *td; - - td = &qset->qtd[qset->td_start]; - status = le32_to_cpu(td->status); - - /* - * Nothing to do with a still active qTD. - */ - if (status & QTD_STS_ACTIVE) - break; - - if (status & QTD_STS_HALTED) { - /* Ug, an error. */ - process_halted_qtd(whc, qset, td); - /* A halted qTD always triggers an update - because the qset was either removed or - reactivated. */ - update |= WHC_UPDATE_UPDATED; - goto done; - } - - /* Mmm, a completed qTD. */ - process_inactive_qtd(whc, qset, td); - } - - if (!qset->remove) - update |= qset_add_qtds(whc, qset); - -done: - /* - * If there are no qTDs in this qset, remove it from the PZL. - */ - if (qset->remove && qset->ntds == 0) { - pzl_qset_remove(whc, qset); - update |= WHC_UPDATE_REMOVED; - } - - return update; -} - -/** - * pzl_start - start the periodic schedule - * @whc: the WHCI host controller - * - * The PZL must be valid (e.g., all entries in the list should have - * the T bit set). - */ -void pzl_start(struct whc *whc) -{ - le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); - - whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN); - whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, - WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED, - 1000, "start PZL"); -} - -/** - * pzl_stop - stop the periodic schedule - * @whc: the WHCI host controller - */ -void pzl_stop(struct whc *whc) -{ - whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0); - whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, - WUSBSTS_PERIODIC_SCHED, 0, - 1000, "stop PZL"); -} - -/** - * pzl_update - request a PZL update and wait for the hardware to be synced - * @whc: the WHCI HC - * @wusbcmd: WUSBCMD value to start the update. - * - * If the WUSB HC is inactive (i.e., the PZL is stopped) then the - * update must be skipped as the hardware may not respond to update - * requests. - */ -void pzl_update(struct whc *whc, uint32_t wusbcmd) -{ - struct wusbhc *wusbhc = &whc->wusbhc; - long t; - - mutex_lock(&wusbhc->mutex); - if (wusbhc->active) { - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); - t = wait_event_timeout( - whc->periodic_list_wq, - (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0, - msecs_to_jiffies(1000)); - if (t == 0) - whc_hw_error(whc, "PZL update timeout"); - } - mutex_unlock(&wusbhc->mutex); -} - -static void update_pzl_hw_view(struct whc *whc) -{ - struct whc_qset *qset, *t; - int period; - u64 tmp_qh = 0; - - for (period = 0; period < 5; period++) { - list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { - whc_qset_set_link_ptr(&qset->qh.link, tmp_qh); - tmp_qh = qset->qset_dma; - qset->in_hw_list = true; - } - update_pzl_pointers(whc, period, tmp_qh); - } -} - -/** - * scan_periodic_work - scan the PZL for qsets to process. - * - * Process each qset in the PZL in turn and then signal the WHC that - * the PZL has been updated. - * - * Then start, stop or update the periodic schedule as required. - */ -void scan_periodic_work(struct work_struct *work) -{ - struct whc *whc = container_of(work, struct whc, periodic_work); - struct whc_qset *qset, *t; - enum whc_update update = 0; - int period; - - spin_lock_irq(&whc->lock); - - for (period = 4; period >= 0; period--) { - list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { - if (!qset->in_hw_list) - update |= WHC_UPDATE_ADDED; - update |= pzl_process_qset(whc, qset); - } - } - - if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) - update_pzl_hw_view(whc); - - spin_unlock_irq(&whc->lock); - - if (update) { - uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB; - if (update & WHC_UPDATE_REMOVED) - wusbcmd |= WUSBCMD_PERIODIC_QSET_RM; - pzl_update(whc, wusbcmd); - } - - /* - * Now that the PZL is updated, complete the removal of any - * removed qsets. - * - * If the qset was to be reset, do so and reinsert it into the - * PZL if it has pending transfers. - */ - spin_lock_irq(&whc->lock); - - list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { - qset_remove_complete(whc, qset); - if (qset->reset) { - qset_reset(whc, qset); - if (!list_empty(&qset->stds)) { - qset_insert_in_sw_list(whc, qset); - queue_work(whc->workqueue, &whc->periodic_work); - } - } - } - - spin_unlock_irq(&whc->lock); -} - -/** - * pzl_urb_enqueue - queue an URB onto the periodic list (PZL) - * @whc: the WHCI host controller - * @urb: the URB to enqueue - * @mem_flags: flags for any memory allocations - * - * The qset for the endpoint is obtained and the urb queued on to it. - * - * Work is scheduled to update the hardware's view of the PZL. - */ -int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) -{ - struct whc_qset *qset; - int err; - unsigned long flags; - - spin_lock_irqsave(&whc->lock, flags); - - err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); - if (err < 0) { - spin_unlock_irqrestore(&whc->lock, flags); - return err; - } - - qset = get_qset(whc, urb, GFP_ATOMIC); - if (qset == NULL) - err = -ENOMEM; - else - err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); - if (!err) { - if (!qset->in_sw_list && !qset->remove) - qset_insert_in_sw_list(whc, qset); - } else - usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); - - spin_unlock_irqrestore(&whc->lock, flags); - - if (!err) - queue_work(whc->workqueue, &whc->periodic_work); - - return err; -} - -/** - * pzl_urb_dequeue - remove an URB (qset) from the periodic list - * @whc: the WHCI host controller - * @urb: the URB to dequeue - * @status: the current status of the URB - * - * URBs that do yet have qTDs can simply be removed from the software - * queue, otherwise the qset must be removed so the qTDs can be safely - * removed. - */ -int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) -{ - struct whc_urb *wurb = urb->hcpriv; - struct whc_qset *qset = wurb->qset; - struct whc_std *std, *t; - bool has_qtd = false; - int ret; - unsigned long flags; - - spin_lock_irqsave(&whc->lock, flags); - - ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); - if (ret < 0) - goto out; - - list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) { - if (std->qtd) - has_qtd = true; - qset_free_std(whc, std); - } else - std->qtd = NULL; /* so this std is re-added when the qset is */ - } - - if (has_qtd) { - pzl_qset_remove(whc, qset); - update_pzl_hw_view(whc); - wurb->status = status; - wurb->is_async = false; - queue_work(whc->workqueue, &wurb->dequeue_work); - } else - qset_remove_urb(whc, qset, urb, status); -out: - spin_unlock_irqrestore(&whc->lock, flags); - - return ret; -} - -/** - * pzl_qset_delete - delete a qset from the PZL - */ -void pzl_qset_delete(struct whc *whc, struct whc_qset *qset) -{ - qset->remove = 1; - queue_work(whc->workqueue, &whc->periodic_work); - qset_delete(whc, qset); -} - -/** - * pzl_init - initialize the periodic zone list - * @whc: the WHCI host controller - */ -int pzl_init(struct whc *whc) -{ - int i; - - whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16, - &whc->pz_list_dma, GFP_KERNEL); - if (whc->pz_list == NULL) - return -ENOMEM; - - /* Set T bit on all elements in PZL. */ - for (i = 0; i < 16; i++) - whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); - - le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); - - return 0; -} - -/** - * pzl_clean_up - free PZL resources - * @whc: the WHCI host controller - * - * The PZL is stopped and empty. - */ -void pzl_clean_up(struct whc *whc) -{ - if (whc->pz_list) - dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list, - whc->pz_list_dma); -} diff --git a/drivers/staging/wusbcore/host/whci/qset.c b/drivers/staging/wusbcore/host/whci/qset.c deleted file mode 100644 index 66459b77dc77..000000000000 --- a/drivers/staging/wusbcore/host/whci/qset.c +++ /dev/null @@ -1,831 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) qset management. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> -#include <linux/dma-mapping.h> -#include <linux/slab.h> -#include <linux/usb.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) -{ - struct whc_qset *qset; - dma_addr_t dma; - - qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma); - if (qset == NULL) - return NULL; - - qset->qset_dma = dma; - qset->whc = whc; - - INIT_LIST_HEAD(&qset->list_node); - INIT_LIST_HEAD(&qset->stds); - - return qset; -} - -/** - * qset_fill_qh - fill the static endpoint state in a qset's QHead - * @qset: the qset whose QH needs initializing with static endpoint - * state - * @urb: an urb for a transfer to this endpoint - */ -static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) -{ - struct usb_device *usb_dev = urb->dev; - struct wusb_dev *wusb_dev = usb_dev->wusb_dev; - struct usb_wireless_ep_comp_descriptor *epcd; - bool is_out; - uint8_t phy_rate; - - is_out = usb_pipeout(urb->pipe); - - qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); - - epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; - if (epcd) { - qset->max_seq = epcd->bMaxSequence; - qset->max_burst = epcd->bMaxBurst; - } else { - qset->max_seq = 2; - qset->max_burst = 1; - } - - /* - * Initial PHY rate is 53.3 Mbit/s for control endpoints or - * the maximum supported by the device for other endpoints - * (unless limited by the user). - */ - if (usb_pipecontrol(urb->pipe)) - phy_rate = UWB_PHY_RATE_53; - else { - uint16_t phy_rates; - - phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); - phy_rate = fls(phy_rates) - 1; - if (phy_rate > whc->wusbhc.phy_rate) - phy_rate = whc->wusbhc.phy_rate; - } - - qset->qh.info1 = cpu_to_le32( - QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) - | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) - | usb_pipe_to_qh_type(urb->pipe) - | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) - | QH_INFO1_MAX_PKT_LEN(qset->max_packet) - ); - qset->qh.info2 = cpu_to_le32( - QH_INFO2_BURST(qset->max_burst) - | QH_INFO2_DBP(0) - | QH_INFO2_MAX_COUNT(3) - | QH_INFO2_MAX_RETRY(3) - | QH_INFO2_MAX_SEQ(qset->max_seq - 1) - ); - /* FIXME: where can we obtain these Tx parameters from? Why - * doesn't the chip know what Tx power to use? It knows the Rx - * strength and can presumably guess the Tx power required - * from that? */ - qset->qh.info3 = cpu_to_le32( - QH_INFO3_TX_RATE(phy_rate) - | QH_INFO3_TX_PWR(0) /* 0 == max power */ - ); - - qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); -} - -/** - * qset_clear - clear fields in a qset so it may be reinserted into a - * schedule. - * - * The sequence number and current window are not cleared (see - * qset_reset()). - */ -void qset_clear(struct whc *whc, struct whc_qset *qset) -{ - qset->td_start = qset->td_end = qset->ntds = 0; - - qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); - qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; - qset->qh.err_count = 0; - qset->qh.scratch[0] = 0; - qset->qh.scratch[1] = 0; - qset->qh.scratch[2] = 0; - - memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); - - init_completion(&qset->remove_complete); -} - -/** - * qset_reset - reset endpoint state in a qset. - * - * Clears the sequence number and current window. This qset must not - * be in the ASL or PZL. - */ -void qset_reset(struct whc *whc, struct whc_qset *qset) -{ - qset->reset = 0; - - qset->qh.status &= ~QH_STATUS_SEQ_MASK; - qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); -} - -/** - * get_qset - get the qset for an async endpoint - * - * A new qset is created if one does not already exist. - */ -struct whc_qset *get_qset(struct whc *whc, struct urb *urb, - gfp_t mem_flags) -{ - struct whc_qset *qset; - - qset = urb->ep->hcpriv; - if (qset == NULL) { - qset = qset_alloc(whc, mem_flags); - if (qset == NULL) - return NULL; - - qset->ep = urb->ep; - urb->ep->hcpriv = qset; - qset_fill_qh(whc, qset, urb); - } - return qset; -} - -void qset_remove_complete(struct whc *whc, struct whc_qset *qset) -{ - qset->remove = 0; - list_del_init(&qset->list_node); - complete(&qset->remove_complete); -} - -/** - * qset_add_qtds - add qTDs for an URB to a qset - * - * Returns true if the list (ASL/PZL) must be updated because (for a - * WHCI 0.95 controller) an activated qTD was pointed to be iCur. - */ -enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) -{ - struct whc_std *std; - enum whc_update update = 0; - - list_for_each_entry(std, &qset->stds, list_node) { - struct whc_qtd *qtd; - uint32_t status; - - if (qset->ntds >= WHCI_QSET_TD_MAX - || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) - break; - - if (std->qtd) - continue; /* already has a qTD */ - - qtd = std->qtd = &qset->qtd[qset->td_end]; - - /* Fill in setup bytes for control transfers. */ - if (usb_pipecontrol(std->urb->pipe)) - memcpy(qtd->setup, std->urb->setup_packet, 8); - - status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); - - if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) - status |= QTD_STS_LAST_PKT; - - /* - * For an IN transfer the iAlt field should be set so - * the h/w will automatically advance to the next - * transfer. However, if there are 8 or more TDs - * remaining in this transfer then iAlt cannot be set - * as it could point to somewhere in this transfer. - */ - if (std->ntds_remaining < WHCI_QSET_TD_MAX) { - int ialt; - ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; - status |= QTD_STS_IALT(ialt); - } else if (usb_pipein(std->urb->pipe)) - qset->pause_after_urb = std->urb; - - if (std->num_pointers) - qtd->options = cpu_to_le32(QTD_OPT_IOC); - else - qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); - qtd->page_list_ptr = cpu_to_le64(std->dma_addr); - - qtd->status = cpu_to_le32(status); - - if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) - update = WHC_UPDATE_UPDATED; - - if (++qset->td_end >= WHCI_QSET_TD_MAX) - qset->td_end = 0; - qset->ntds++; - } - - return update; -} - -/** - * qset_remove_qtd - remove the first qTD from a qset. - * - * The qTD might be still active (if it's part of a IN URB that - * resulted in a short read) so ensure it's deactivated. - */ -static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) -{ - qset->qtd[qset->td_start].status = 0; - - if (++qset->td_start >= WHCI_QSET_TD_MAX) - qset->td_start = 0; - qset->ntds--; -} - -static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) -{ - struct scatterlist *sg; - void *bounce; - size_t remaining, offset; - - bounce = std->bounce_buf; - remaining = std->len; - - sg = std->bounce_sg; - offset = std->bounce_offset; - - while (remaining) { - size_t len; - - len = min(sg->length - offset, remaining); - memcpy(sg_virt(sg) + offset, bounce, len); - - bounce += len; - remaining -= len; - - offset += len; - if (offset >= sg->length) { - sg = sg_next(sg); - offset = 0; - } - } - -} - -/** - * qset_free_std - remove an sTD and free it. - * @whc: the WHCI host controller - * @std: the sTD to remove and free. - */ -void qset_free_std(struct whc *whc, struct whc_std *std) -{ - list_del(&std->list_node); - if (std->bounce_buf) { - bool is_out = usb_pipeout(std->urb->pipe); - dma_addr_t dma_addr; - - if (std->num_pointers) - dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); - else - dma_addr = std->dma_addr; - - dma_unmap_single(whc->wusbhc.dev, dma_addr, - std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (!is_out) - qset_copy_bounce_to_sg(whc, std); - kfree(std->bounce_buf); - } - if (std->pl_virt) { - if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) - dma_unmap_single(whc->wusbhc.dev, std->dma_addr, - std->num_pointers * sizeof(struct whc_page_list_entry), - DMA_TO_DEVICE); - kfree(std->pl_virt); - std->pl_virt = NULL; - } - kfree(std); -} - -/** - * qset_remove_qtds - remove an URB's qTDs (and sTDs). - */ -static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, - struct urb *urb) -{ - struct whc_std *std, *t; - - list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb != urb) - break; - if (std->qtd != NULL) - qset_remove_qtd(whc, qset); - qset_free_std(whc, std); - } -} - -/** - * qset_free_stds - free any remaining sTDs for an URB. - */ -static void qset_free_stds(struct whc_qset *qset, struct urb *urb) -{ - struct whc_std *std, *t; - - list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) - qset_free_std(qset->whc, std); - } -} - -static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) -{ - dma_addr_t dma_addr = std->dma_addr; - dma_addr_t sp, ep; - size_t pl_len; - int p; - - /* Short buffers don't need a page list. */ - if (std->len <= WHCI_PAGE_SIZE) { - std->num_pointers = 0; - return 0; - } - - sp = dma_addr & ~(WHCI_PAGE_SIZE-1); - ep = dma_addr + std->len; - std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); - - pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); - std->pl_virt = kmalloc(pl_len, mem_flags); - if (std->pl_virt == NULL) - return -ENOMEM; - std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); - if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) { - kfree(std->pl_virt); - return -EFAULT; - } - - for (p = 0; p < std->num_pointers; p++) { - std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); - dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); - } - - return 0; -} - -/** - * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. - */ -static void urb_dequeue_work(struct work_struct *work) -{ - struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); - struct whc_qset *qset = wurb->qset; - struct whc *whc = qset->whc; - unsigned long flags; - - if (wurb->is_async) - asl_update(whc, WUSBCMD_ASYNC_UPDATED - | WUSBCMD_ASYNC_SYNCED_DB - | WUSBCMD_ASYNC_QSET_RM); - else - pzl_update(whc, WUSBCMD_PERIODIC_UPDATED - | WUSBCMD_PERIODIC_SYNCED_DB - | WUSBCMD_PERIODIC_QSET_RM); - - spin_lock_irqsave(&whc->lock, flags); - qset_remove_urb(whc, qset, wurb->urb, wurb->status); - spin_unlock_irqrestore(&whc->lock, flags); -} - -static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, - struct urb *urb, gfp_t mem_flags) -{ - struct whc_std *std; - - std = kzalloc(sizeof(struct whc_std), mem_flags); - if (std == NULL) - return NULL; - - std->urb = urb; - std->qtd = NULL; - - INIT_LIST_HEAD(&std->list_node); - list_add_tail(&std->list_node, &qset->stds); - - return std; -} - -static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, - gfp_t mem_flags) -{ - size_t remaining; - struct scatterlist *sg; - int i; - int ntds = 0; - struct whc_std *std = NULL; - struct whc_page_list_entry *new_pl_virt; - dma_addr_t prev_end = 0; - size_t pl_len; - int p = 0; - - remaining = urb->transfer_buffer_length; - - for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { - dma_addr_t dma_addr; - size_t dma_remaining; - dma_addr_t sp, ep; - int num_pointers; - - if (remaining == 0) { - break; - } - - dma_addr = sg_dma_address(sg); - dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); - - while (dma_remaining) { - size_t dma_len; - - /* - * We can use the previous std (if it exists) provided that: - * - the previous one ended on a page boundary. - * - the current one begins on a page boundary. - * - the previous one isn't full. - * - * If a new std is needed but the previous one - * was not a whole number of packets then this - * sg list cannot be mapped onto multiple - * qTDs. Return an error and let the caller - * sort it out. - */ - if (!std - || (prev_end & (WHCI_PAGE_SIZE-1)) - || (dma_addr & (WHCI_PAGE_SIZE-1)) - || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { - if (std && std->len % qset->max_packet != 0) - return -EINVAL; - std = qset_new_std(whc, qset, urb, mem_flags); - if (std == NULL) { - return -ENOMEM; - } - ntds++; - p = 0; - } - - dma_len = dma_remaining; - - /* - * If the remainder of this element doesn't - * fit in a single qTD, limit the qTD to a - * whole number of packets. This allows the - * remainder to go into the next qTD. - */ - if (std->len + dma_len > QTD_MAX_XFER_SIZE) { - dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) - * qset->max_packet - std->len; - } - - std->len += dma_len; - std->ntds_remaining = -1; /* filled in later */ - - sp = dma_addr & ~(WHCI_PAGE_SIZE-1); - ep = dma_addr + dma_len; - num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); - std->num_pointers += num_pointers; - - pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); - - new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); - if (new_pl_virt == NULL) { - kfree(std->pl_virt); - std->pl_virt = NULL; - return -ENOMEM; - } - std->pl_virt = new_pl_virt; - - for (;p < std->num_pointers; p++) { - std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); - dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); - } - - prev_end = dma_addr = ep; - dma_remaining -= dma_len; - remaining -= dma_len; - } - } - - /* Now the number of stds is know, go back and fill in - std->ntds_remaining. */ - list_for_each_entry(std, &qset->stds, list_node) { - if (std->ntds_remaining == -1) { - pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); - std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, - pl_len, DMA_TO_DEVICE); - if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) - return -EFAULT; - std->ntds_remaining = ntds--; - } - } - return 0; -} - -/** - * qset_add_urb_sg_linearize - add an urb with sg list, copying the data - * - * If the URB contains an sg list whose elements cannot be directly - * mapped to qTDs then the data must be transferred via bounce - * buffers. - */ -static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, - struct urb *urb, gfp_t mem_flags) -{ - bool is_out = usb_pipeout(urb->pipe); - size_t max_std_len; - size_t remaining; - int ntds = 0; - struct whc_std *std = NULL; - void *bounce = NULL; - struct scatterlist *sg; - int i; - - /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ - max_std_len = qset->max_burst * qset->max_packet; - - remaining = urb->transfer_buffer_length; - - for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { - size_t len; - size_t sg_remaining; - void *orig; - - if (remaining == 0) { - break; - } - - sg_remaining = min_t(size_t, remaining, sg->length); - orig = sg_virt(sg); - - while (sg_remaining) { - if (!std || std->len == max_std_len) { - std = qset_new_std(whc, qset, urb, mem_flags); - if (std == NULL) - return -ENOMEM; - std->bounce_buf = kmalloc(max_std_len, mem_flags); - if (std->bounce_buf == NULL) - return -ENOMEM; - std->bounce_sg = sg; - std->bounce_offset = orig - sg_virt(sg); - bounce = std->bounce_buf; - ntds++; - } - - len = min(sg_remaining, max_std_len - std->len); - - if (is_out) - memcpy(bounce, orig, len); - - std->len += len; - std->ntds_remaining = -1; /* filled in later */ - - bounce += len; - orig += len; - sg_remaining -= len; - remaining -= len; - } - } - - /* - * For each of the new sTDs, map the bounce buffers, create - * page lists (if necessary), and fill in std->ntds_remaining. - */ - list_for_each_entry(std, &qset->stds, list_node) { - if (std->ntds_remaining != -1) - continue; - - std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, - is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (dma_mapping_error(&whc->umc->dev, std->dma_addr)) - return -EFAULT; - - if (qset_fill_page_list(whc, std, mem_flags) < 0) - return -ENOMEM; - - std->ntds_remaining = ntds--; - } - - return 0; -} - -/** - * qset_add_urb - add an urb to the qset's queue. - * - * The URB is chopped into sTDs, one for each qTD that will required. - * At least one qTD (and sTD) is required even if the transfer has no - * data (e.g., for some control transfers). - */ -int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, - gfp_t mem_flags) -{ - struct whc_urb *wurb; - int remaining = urb->transfer_buffer_length; - u64 transfer_dma = urb->transfer_dma; - int ntds_remaining; - int ret; - - wurb = kzalloc(sizeof(struct whc_urb), mem_flags); - if (wurb == NULL) - goto err_no_mem; - urb->hcpriv = wurb; - wurb->qset = qset; - wurb->urb = urb; - INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); - - if (urb->num_sgs) { - ret = qset_add_urb_sg(whc, qset, urb, mem_flags); - if (ret == -EINVAL) { - qset_free_stds(qset, urb); - ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); - } - if (ret < 0) - goto err_no_mem; - return 0; - } - - ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); - if (ntds_remaining == 0) - ntds_remaining = 1; - - while (ntds_remaining) { - struct whc_std *std; - size_t std_len; - - std_len = remaining; - if (std_len > QTD_MAX_XFER_SIZE) - std_len = QTD_MAX_XFER_SIZE; - - std = qset_new_std(whc, qset, urb, mem_flags); - if (std == NULL) - goto err_no_mem; - - std->dma_addr = transfer_dma; - std->len = std_len; - std->ntds_remaining = ntds_remaining; - - if (qset_fill_page_list(whc, std, mem_flags) < 0) - goto err_no_mem; - - ntds_remaining--; - remaining -= std_len; - transfer_dma += std_len; - } - - return 0; - -err_no_mem: - qset_free_stds(qset, urb); - return -ENOMEM; -} - -/** - * qset_remove_urb - remove an URB from the urb queue. - * - * The URB is returned to the USB subsystem. - */ -void qset_remove_urb(struct whc *whc, struct whc_qset *qset, - struct urb *urb, int status) -{ - struct wusbhc *wusbhc = &whc->wusbhc; - struct whc_urb *wurb = urb->hcpriv; - - usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); - /* Drop the lock as urb->complete() may enqueue another urb. */ - spin_unlock(&whc->lock); - wusbhc_giveback_urb(wusbhc, urb, status); - spin_lock(&whc->lock); - - kfree(wurb); -} - -/** - * get_urb_status_from_qtd - get the completed urb status from qTD status - * @urb: completed urb - * @status: qTD status - */ -static int get_urb_status_from_qtd(struct urb *urb, u32 status) -{ - if (status & QTD_STS_HALTED) { - if (status & QTD_STS_DBE) - return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; - else if (status & QTD_STS_BABBLE) - return -EOVERFLOW; - else if (status & QTD_STS_RCE) - return -ETIME; - return -EPIPE; - } - if (usb_pipein(urb->pipe) - && (urb->transfer_flags & URB_SHORT_NOT_OK) - && urb->actual_length < urb->transfer_buffer_length) - return -EREMOTEIO; - return 0; -} - -/** - * process_inactive_qtd - process an inactive (but not halted) qTD. - * - * Update the urb with the transfer bytes from the qTD, if the urb is - * completely transferred or (in the case of an IN only) the LPF is - * set, then the transfer is complete and the urb should be returned - * to the system. - */ -void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, - struct whc_qtd *qtd) -{ - struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); - struct urb *urb = std->urb; - uint32_t status; - bool complete; - - status = le32_to_cpu(qtd->status); - - urb->actual_length += std->len - QTD_STS_TO_LEN(status); - - if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) - complete = true; - else - complete = whc_std_last(std); - - qset_remove_qtd(whc, qset); - qset_free_std(whc, std); - - /* - * Transfers for this URB are complete? Then return it to the - * USB subsystem. - */ - if (complete) { - qset_remove_qtds(whc, qset, urb); - qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); - - /* - * If iAlt isn't valid then the hardware didn't - * advance iCur. Adjust the start and end pointers to - * match iCur. - */ - if (!(status & QTD_STS_IALT_VALID)) - qset->td_start = qset->td_end - = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); - qset->pause_after_urb = NULL; - } -} - -/** - * process_halted_qtd - process a qset with a halted qtd - * - * Remove all the qTDs for the failed URB and return the failed URB to - * the USB subsystem. Then remove all other qTDs so the qset can be - * removed. - * - * FIXME: this is the point where rate adaptation can be done. If a - * transfer failed because it exceeded the maximum number of retries - * then it could be reactivated with a slower rate without having to - * remove the qset. - */ -void process_halted_qtd(struct whc *whc, struct whc_qset *qset, - struct whc_qtd *qtd) -{ - struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); - struct urb *urb = std->urb; - int urb_status; - - urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); - - qset_remove_qtds(whc, qset, urb); - qset_remove_urb(whc, qset, urb, urb_status); - - list_for_each_entry(std, &qset->stds, list_node) { - if (qset->ntds == 0) - break; - qset_remove_qtd(whc, qset); - std->qtd = NULL; - } - - qset->remove = 1; -} - -void qset_free(struct whc *whc, struct whc_qset *qset) -{ - dma_pool_free(whc->qset_pool, qset, qset->qset_dma); -} - -/** - * qset_delete - wait for a qset to be unused, then free it. - */ -void qset_delete(struct whc *whc, struct whc_qset *qset) -{ - wait_for_completion(&qset->remove_complete); - qset_free(whc, qset); -} diff --git a/drivers/staging/wusbcore/host/whci/whcd.h b/drivers/staging/wusbcore/host/whci/whcd.h deleted file mode 100644 index a442a2589e83..000000000000 --- a/drivers/staging/wusbcore/host/whci/whcd.h +++ /dev/null @@ -1,202 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) private header. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#ifndef __WHCD_H -#define __WHCD_H - -#include <linux/workqueue.h> - -#include "../../../uwb/include/whci.h" -#include "../../../uwb/include/umc.h" -#include "whci-hc.h" - -/* Generic command timeout. */ -#define WHC_GENCMD_TIMEOUT_MS 100 - -struct whc_dbg; - -struct whc { - struct wusbhc wusbhc; - struct umc_dev *umc; - - resource_size_t base_phys; - void __iomem *base; - int irq; - - u8 n_devices; - u8 n_keys; - u8 n_mmc_ies; - - u64 *pz_list; - struct dn_buf_entry *dn_buf; - struct di_buf_entry *di_buf; - dma_addr_t pz_list_dma; - dma_addr_t dn_buf_dma; - dma_addr_t di_buf_dma; - - spinlock_t lock; - struct mutex mutex; - - void * gen_cmd_buf; - dma_addr_t gen_cmd_buf_dma; - wait_queue_head_t cmd_wq; - - struct workqueue_struct *workqueue; - struct work_struct dn_work; - - struct dma_pool *qset_pool; - - struct list_head async_list; - struct list_head async_removed_list; - wait_queue_head_t async_list_wq; - struct work_struct async_work; - - struct list_head periodic_list[5]; - struct list_head periodic_removed_list; - wait_queue_head_t periodic_list_wq; - struct work_struct periodic_work; - - struct whc_dbg *dbg; -}; - -#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) - -/** - * struct whc_std - a software TD. - * @urb: the URB this sTD is for. - * @offset: start of the URB's data for this TD. - * @len: the length of data in the associated TD. - * @ntds_remaining: number of TDs (starting from this one) in this transfer. - * - * @bounce_buf: a bounce buffer if the std was from an urb with a sg - * list that could not be mapped to qTDs directly. - * @bounce_sg: the first scatterlist element bounce_buf is for. - * @bounce_offset: the offset into bounce_sg for the start of bounce_buf. - * - * Queued URBs may require more TDs than are available in a qset so we - * use a list of these "software TDs" (sTDs) to hold per-TD data. - */ -struct whc_std { - struct urb *urb; - size_t len; - int ntds_remaining; - struct whc_qtd *qtd; - - struct list_head list_node; - int num_pointers; - dma_addr_t dma_addr; - struct whc_page_list_entry *pl_virt; - - void *bounce_buf; - struct scatterlist *bounce_sg; - unsigned bounce_offset; -}; - -/** - * struct whc_urb - per URB host controller structure. - * @urb: the URB this struct is for. - * @qset: the qset associated to the URB. - * @dequeue_work: the work to remove the URB when dequeued. - * @is_async: the URB belongs to async sheduler or not. - * @status: the status to be returned when calling wusbhc_giveback_urb. - */ -struct whc_urb { - struct urb *urb; - struct whc_qset *qset; - struct work_struct dequeue_work; - bool is_async; - int status; -}; - -/** - * whc_std_last - is this sTD the URB's last? - * @std: the sTD to check. - */ -static inline bool whc_std_last(struct whc_std *std) -{ - return std->ntds_remaining <= 1; -} - -enum whc_update { - WHC_UPDATE_ADDED = 0x01, - WHC_UPDATE_REMOVED = 0x02, - WHC_UPDATE_UPDATED = 0x04, -}; - -/* init.c */ -int whc_init(struct whc *whc); -void whc_clean_up(struct whc *whc); - -/* hw.c */ -void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val); -int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); -void whc_hw_error(struct whc *whc, const char *reason); - -/* wusb.c */ -int whc_wusbhc_start(struct wusbhc *wusbhc); -void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay); -int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, - u8 handle, struct wuie_hdr *wuie); -int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); -int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm); -int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); -int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots); -int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, - const void *ptk, size_t key_size); -int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, - const void *gtk, size_t key_size); -int whc_set_cluster_id(struct whc *whc, u8 bcid); - -/* int.c */ -irqreturn_t whc_int_handler(struct usb_hcd *hcd); -void whc_dn_work(struct work_struct *work); - -/* asl.c */ -void asl_start(struct whc *whc); -void asl_stop(struct whc *whc); -int asl_init(struct whc *whc); -void asl_clean_up(struct whc *whc); -int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); -int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status); -void asl_qset_delete(struct whc *whc, struct whc_qset *qset); -void scan_async_work(struct work_struct *work); - -/* pzl.c */ -int pzl_init(struct whc *whc); -void pzl_clean_up(struct whc *whc); -void pzl_start(struct whc *whc); -void pzl_stop(struct whc *whc); -int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); -int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status); -void pzl_qset_delete(struct whc *whc, struct whc_qset *qset); -void scan_periodic_work(struct work_struct *work); - -/* qset.c */ -struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags); -void qset_free(struct whc *whc, struct whc_qset *qset); -struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); -void qset_delete(struct whc *whc, struct whc_qset *qset); -void qset_clear(struct whc *whc, struct whc_qset *qset); -void qset_reset(struct whc *whc, struct whc_qset *qset); -int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, - gfp_t mem_flags); -void qset_free_std(struct whc *whc, struct whc_std *std); -void qset_remove_urb(struct whc *whc, struct whc_qset *qset, - struct urb *urb, int status); -void process_halted_qtd(struct whc *whc, struct whc_qset *qset, - struct whc_qtd *qtd); -void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, - struct whc_qtd *qtd); -enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); -void qset_remove_complete(struct whc *whc, struct whc_qset *qset); -void pzl_update(struct whc *whc, uint32_t wusbcmd); -void asl_update(struct whc *whc, uint32_t wusbcmd); - -/* debug.c */ -void whc_dbg_init(struct whc *whc); -void whc_dbg_clean_up(struct whc *whc); - -#endif /* #ifndef __WHCD_H */ diff --git a/drivers/staging/wusbcore/host/whci/whci-hc.h b/drivers/staging/wusbcore/host/whci/whci-hc.h deleted file mode 100644 index 5a86a57a80cc..000000000000 --- a/drivers/staging/wusbcore/host/whci/whci-hc.h +++ /dev/null @@ -1,401 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) data structures. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#ifndef _WHCI_WHCI_HC_H -#define _WHCI_WHCI_HC_H - -#include <linux/list.h> - -/** - * WHCI_PAGE_SIZE - page size use by WHCI - * - * WHCI assumes that host system uses pages of 4096 octets. - */ -#define WHCI_PAGE_SIZE 4096 - - -/** - * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single - * qtd. - * - * This is 2^20 - 1. - */ -#define QTD_MAX_XFER_SIZE 1048575 - - -/** - * struct whc_qtd - Queue Element Transfer Descriptors (qTD) - * - * This describes the data for a bulk, control or interrupt transfer. - * - * [WHCI] section 3.2.4 - */ -struct whc_qtd { - __le32 status; /*< remaining transfer len and transfer status */ - __le32 options; - __le64 page_list_ptr; /*< physical pointer to data buffer page list*/ - __u8 setup[8]; /*< setup data for control transfers */ -} __attribute__((packed)); - -#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */ -#define QTD_STS_HALTED (1 << 30) /* transfer halted */ -#define QTD_STS_DBE (1 << 29) /* data buffer error */ -#define QTD_STS_BABBLE (1 << 28) /* babble detected */ -#define QTD_STS_RCE (1 << 27) /* retry count exceeded */ -#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */ -#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */ -#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */ -#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */ -#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */ -#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff) - -#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */ -#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */ - -/** - * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD) - * - * This describes the data and other parameters for an isochronous - * transfer. - * - * [WHCI] section 3.2.5 - */ -struct whc_itd { - __le16 presentation_time; /*< presentation time for OUT transfers */ - __u8 num_segments; /*< number of data segments in segment list */ - __u8 status; /*< command execution status */ - __le32 options; /*< misc transfer options */ - __le64 page_list_ptr; /*< physical pointer to data buffer page list */ - __le64 seg_list_ptr; /*< physical pointer to segment list */ -} __attribute__((packed)); - -#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */ -#define ITD_STS_DBE (1 << 5) /* data buffer error */ -#define ITD_STS_BABBLE (1 << 4) /* babble detected */ -#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */ - -#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */ -#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */ - -/** - * Page list entry. - * - * A TD's page list must contain sufficient page list entries for the - * total data length in the TD. - * - * [WHCI] section 3.2.4.3 - */ -struct whc_page_list_entry { - __le64 buf_ptr; /*< physical pointer to buffer */ -} __attribute__((packed)); - -/** - * struct whc_seg_list_entry - Segment list entry. - * - * Describes a portion of the data buffer described in the containing - * qTD's page list. - * - * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr - * + qtd->seg_list_ptr[seg].offset; - * - * Segments can't cross page boundries. - * - * [WHCI] section 3.2.5.5 - */ -struct whc_seg_list_entry { - __le16 len; /*< segment length */ - __u8 idx; /*< index into page list */ - __u8 status; /*< segment status */ - __le16 offset; /*< 12 bit offset into page */ -} __attribute__((packed)); - -/** - * struct whc_qhead - endpoint and status information for a qset. - * - * [WHCI] section 3.2.6 - */ -struct whc_qhead { - __le64 link; /*< next qset in list */ - __le32 info1; - __le32 info2; - __le32 info3; - __le16 status; - __le16 err_count; /*< transaction error count */ - __le32 cur_window; - __le32 scratch[3]; /*< h/w scratch area */ - union { - struct whc_qtd qtd; - struct whc_itd itd; - } overlay; -} __attribute__((packed)); - -#define QH_LINK_PTR_MASK (~0x03Full) -#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK) -#define QH_LINK_IQS (1 << 4) /* isochronous queue set */ -#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */ -#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */ - -#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */ -#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */ -#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */ -#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */ -#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */ -#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */ -#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */ -#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */ -#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */ -#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */ -#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */ - -#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */ -#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */ -#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */ -#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */ -#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */ -#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */ -#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ -#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ - -#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */ -#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ - -#define QH_STATUS_FLOW_CTRL (1 << 15) -#define QH_STATUS_ICUR(i) ((i) << 5) -#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7) -#define QH_STATUS_SEQ_MASK 0x1f - -/** - * usb_pipe_to_qh_type - USB core pipe type to QH transfer type - * - * Returns the QH type field for a USB core pipe type. - */ -static inline unsigned usb_pipe_to_qh_type(unsigned pipe) -{ - static const unsigned type[] = { - [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC, - [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT, - [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL, - [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK, - }; - return type[usb_pipetype(pipe)]; -} - -/** - * Maxiumum number of TDs in a qset. - */ -#define WHCI_QSET_TD_MAX 8 - -/** - * struct whc_qset - WUSB data transfers to a specific endpoint - * @qh: the QHead of this qset - * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt - * transfers) - * @itd: up to 8 iTDs (for qsets for isochronous transfers) - * @qset_dma: DMA address for this qset - * @whc: WHCI HC this qset is for - * @ep: endpoint - * @stds: list of sTDs queued to this qset - * @ntds: number of qTDs queued (not necessarily the same as nTDs - * field in the QH) - * @td_start: index of the first qTD in the list - * @td_end: index of next free qTD in the list (provided - * ntds < WHCI_QSET_TD_MAX) - * - * Queue Sets (qsets) are added to the asynchronous schedule list - * (ASL) or the periodic zone list (PZL). - * - * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate). - * Each TD may refer to at most 1 MiB of data. If a single transfer - * has > 8MiB of data, TDs can be reused as they are completed since - * the TD list is used as a circular buffer. Similarly, several - * (smaller) transfers may be queued in a qset. - * - * WHCI controllers may cache portions of the qsets in the ASL and - * PZL, requiring the WHCD to inform the WHC that the lists have been - * updated (fields changed or qsets inserted or removed). For safe - * insertion and removal of qsets from the lists the schedule must be - * stopped to avoid races in updating the QH link pointers. - * - * Since the HC is free to execute qsets in any order, all transfers - * to an endpoint should use the same qset to ensure transfers are - * executed in the order they're submitted. - * - * [WHCI] section 3.2.3 - */ -struct whc_qset { - struct whc_qhead qh; - union { - struct whc_qtd qtd[WHCI_QSET_TD_MAX]; - struct whc_itd itd[WHCI_QSET_TD_MAX]; - }; - - /* private data for WHCD */ - dma_addr_t qset_dma; - struct whc *whc; - struct usb_host_endpoint *ep; - struct list_head stds; - int ntds; - int td_start; - int td_end; - struct list_head list_node; - unsigned in_sw_list:1; - unsigned in_hw_list:1; - unsigned remove:1; - unsigned reset:1; - struct urb *pause_after_urb; - struct completion remove_complete; - uint16_t max_packet; - uint8_t max_burst; - uint8_t max_seq; -}; - -static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) -{ - if (target) - *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target); - else - *ptr = QH_LINK_T; -} - -/** - * struct di_buf_entry - Device Information (DI) buffer entry. - * - * There's one of these per connected device. - */ -struct di_buf_entry { - __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */ - __le32 addr_sec_info; /*< addressing and security info */ - __le32 reserved[7]; -} __attribute__((packed)); - -#define WHC_DI_SECURE (1 << 31) -#define WHC_DI_DISABLE (1 << 30) -#define WHC_DI_KEY_IDX(k) ((k) << 8) -#define WHC_DI_KEY_IDX_MASK 0x0000ff00 -#define WHC_DI_DEV_ADDR(a) ((a) << 0) -#define WHC_DI_DEV_ADDR_MASK 0x000000ff - -/** - * struct dn_buf_entry - Device Notification (DN) buffer entry. - * - * [WHCI] section 3.2.8 - */ -struct dn_buf_entry { - __u8 msg_size; /*< number of octets of valid DN data */ - __u8 reserved1; - __u8 src_addr; /*< source address */ - __u8 status; /*< buffer entry status */ - __le32 tkid; /*< TKID for source device, valid if secure bit is set */ - __u8 dn_data[56]; /*< up to 56 octets of DN data */ -} __attribute__((packed)); - -#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */ -#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */ - -#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry)) - -/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of - data. [WHCI] section 2.4.7. */ -#define WHC_GEN_CMD_DATA_LEN 256 - -/* - * HC registers. - * - * [WHCI] section 2.4 - */ - -#define WHCIVERSION 0x00 - -#define WHCSPARAMS 0x04 -# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff) -# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff) -# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f) - -#define WUSBCMD 0x08 -# define WUSBCMD_BCID(b) ((b) << 16) -# define WUSBCMD_BCID_MASK (0xff << 16) -# define WUSBCMD_ASYNC_QSET_RM (1 << 12) -# define WUSBCMD_PERIODIC_QSET_RM (1 << 11) -# define WUSBCMD_WUSBSI(s) ((s) << 8) -# define WUSBCMD_WUSBSI_MASK (0x7 << 8) -# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7) -# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6) -# define WUSBCMD_ASYNC_UPDATED (1 << 5) -# define WUSBCMD_PERIODIC_UPDATED (1 << 4) -# define WUSBCMD_ASYNC_EN (1 << 3) -# define WUSBCMD_PERIODIC_EN (1 << 2) -# define WUSBCMD_WHCRESET (1 << 1) -# define WUSBCMD_RUN (1 << 0) - -#define WUSBSTS 0x0c -# define WUSBSTS_ASYNC_SCHED (1 << 15) -# define WUSBSTS_PERIODIC_SCHED (1 << 14) -# define WUSBSTS_DNTS_SCHED (1 << 13) -# define WUSBSTS_HCHALTED (1 << 12) -# define WUSBSTS_GEN_CMD_DONE (1 << 9) -# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8) -# define WUSBSTS_DNTS_OVERFLOW (1 << 7) -# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6) -# define WUSBSTS_HOST_ERR (1 << 5) -# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4) -# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3) -# define WUSBSTS_DNTS_INT (1 << 2) -# define WUSBSTS_ERR_INT (1 << 1) -# define WUSBSTS_INT (1 << 0) -# define WUSBSTS_INT_MASK 0x3ff - -#define WUSBINTR 0x10 -# define WUSBINTR_GEN_CMD_DONE (1 << 9) -# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8) -# define WUSBINTR_DNTS_OVERFLOW (1 << 7) -# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6) -# define WUSBINTR_HOST_ERR (1 << 5) -# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4) -# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3) -# define WUSBINTR_DNTS_INT (1 << 2) -# define WUSBINTR_ERR_INT (1 << 1) -# define WUSBINTR_INT (1 << 0) -# define WUSBINTR_ALL 0x3ff - -#define WUSBGENCMDSTS 0x14 -# define WUSBGENCMDSTS_ACTIVE (1 << 31) -# define WUSBGENCMDSTS_ERROR (1 << 24) -# define WUSBGENCMDSTS_IOC (1 << 23) -# define WUSBGENCMDSTS_MMCIE_ADD 0x01 -# define WUSBGENCMDSTS_MMCIE_RM 0x02 -# define WUSBGENCMDSTS_SET_MAS 0x03 -# define WUSBGENCMDSTS_CHAN_STOP 0x04 -# define WUSBGENCMDSTS_RWP_EN 0x05 - -#define WUSBGENCMDPARAMS 0x18 -#define WUSBGENADDR 0x20 -#define WUSBASYNCLISTADDR 0x28 -#define WUSBDNTSBUFADDR 0x30 -#define WUSBDEVICEINFOADDR 0x38 - -#define WUSBSETSECKEYCMD 0x40 -# define WUSBSETSECKEYCMD_SET (1 << 31) -# define WUSBSETSECKEYCMD_ERASE (1 << 30) -# define WUSBSETSECKEYCMD_GTK (1 << 8) -# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0) - -#define WUSBTKID 0x44 -#define WUSBSECKEY 0x48 -#define WUSBPERIODICLISTBASE 0x58 -#define WUSBMASINDEX 0x60 - -#define WUSBDNTSCTRL 0x64 -# define WUSBDNTSCTRL_ACTIVE (1 << 31) -# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8) -# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) - -#define WUSBTIME 0x68 -# define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff - -#define WUSBBPST 0x6c -#define WUSBDIBUPDATED 0x70 - -#endif /* #ifndef _WHCI_WHCI_HC_H */ diff --git a/drivers/staging/wusbcore/host/whci/wusb.c b/drivers/staging/wusbcore/host/whci/wusb.c deleted file mode 100644 index 6d0068ab35e4..000000000000 --- a/drivers/staging/wusbcore/host/whci/wusb.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless Host Controller (WHC) WUSB operations. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> - -#include "../../../uwb/include/umc.h" -#include "../../wusbhc.h" - -#include "whcd.h" - -static int whc_update_di(struct whc *whc, int idx) -{ - int offset = idx / 32; - u32 bit = 1 << (idx % 32); - - le_writel(bit, whc->base + WUSBDIBUPDATED + offset); - - return whci_wait_for(&whc->umc->dev, - whc->base + WUSBDIBUPDATED + offset, bit, 0, - 100, "DI update"); -} - -/* - * WHCI starts MMCs based on there being a valid GTK so these need - * only start/stop the asynchronous and periodic schedules and send a - * channel stop command. - */ - -int whc_wusbhc_start(struct wusbhc *wusbhc) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - - asl_start(whc); - pzl_start(whc); - - return 0; -} - -void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - u32 stop_time, now_time; - int ret; - - pzl_stop(whc); - asl_stop(whc); - - now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK; - stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff; - ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0); - if (ret == 0) - msleep(delay); -} - -int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, - u8 handle, struct wuie_hdr *wuie) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - u32 params; - - params = (interval << 24) - | (repeat_cnt << 16) - | (wuie->bLength << 8) - | handle; - - return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength); -} - -int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - u32 params; - - params = handle; - - return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0); -} - -int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - - if (stream_index >= 0) - whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index)); - - return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm)); -} - -int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - int idx = wusb_dev->port_idx; - struct di_buf_entry *di = &whc->di_buf[idx]; - int ret; - - mutex_lock(&whc->mutex); - - uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability); - di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK); - di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr); - - ret = whc_update_di(whc, idx); - - mutex_unlock(&whc->mutex); - - return ret; -} - -/* - * Set the number of Device Notification Time Slots (DNTS) and enable - * device notifications. - */ -int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - u32 dntsctrl; - - dntsctrl = WUSBDNTSCTRL_ACTIVE - | WUSBDNTSCTRL_INTERVAL(interval) - | WUSBDNTSCTRL_SLOTS(slots); - - le_writel(dntsctrl, whc->base + WUSBDNTSCTRL); - - return 0; -} - -static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, - const void *key, size_t key_size, bool is_gtk) -{ - uint32_t setkeycmd; - uint32_t seckey[4]; - int i; - int ret; - - memcpy(seckey, key, key_size); - setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index); - if (is_gtk) - setkeycmd |= WUSBSETSECKEYCMD_GTK; - - le_writel(tkid, whc->base + WUSBTKID); - for (i = 0; i < 4; i++) - le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i); - le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD); - - ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD, - WUSBSETSECKEYCMD_SET, 0, 100, "set key"); - - return ret; -} - -/** - * whc_set_ptk - set the PTK to use for a device. - * - * The index into the key table for this PTK is the same as the - * device's port index. - */ -int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, - const void *ptk, size_t key_size) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - struct di_buf_entry *di = &whc->di_buf[port_idx]; - int ret; - - mutex_lock(&whc->mutex); - - if (ptk) { - ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false); - if (ret) - goto out; - - di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK; - di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx); - } else - di->addr_sec_info &= ~WHC_DI_SECURE; - - ret = whc_update_di(whc, port_idx); -out: - mutex_unlock(&whc->mutex); - return ret; -} - -/** - * whc_set_gtk - set the GTK for subsequent broadcast packets - * - * The GTK is stored in the last entry in the key table (the previous - * N_DEVICES entries are for the per-device PTKs). - */ -int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, - const void *gtk, size_t key_size) -{ - struct whc *whc = wusbhc_to_whc(wusbhc); - int ret; - - mutex_lock(&whc->mutex); - - ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true); - - mutex_unlock(&whc->mutex); - - return ret; -} - -int whc_set_cluster_id(struct whc *whc, u8 bcid) -{ - whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid)); - return 0; -} diff --git a/drivers/staging/wusbcore/include/association.h b/drivers/staging/wusbcore/include/association.h deleted file mode 100644 index d7f3cb9b9db5..000000000000 --- a/drivers/staging/wusbcore/include/association.h +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB - Cable Based Association - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - */ -#ifndef __LINUX_USB_ASSOCIATION_H -#define __LINUX_USB_ASSOCIATION_H - - -/* - * Association attributes - * - * Association Models Supplement to WUSB 1.0 T[3-1] - * - * Each field in the structures has it's ID, it's length and then the - * value. This is the actual definition of the field's ID and its - * length. - */ -struct wusb_am_attr { - __u8 id; - __u8 len; -}; - -/* Different fields defined by the spec */ -#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } -#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } -#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } -#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } -#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } -#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } -#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } -#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } -#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } - -/* CBAF Control Requests (AMS1.0[T4-1] */ -enum { - CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01, - CBAF_REQ_GET_ASSOCIATION_REQUEST, - CBAF_REQ_SET_ASSOCIATION_RESPONSE -}; - -/* - * CBAF USB-interface defitions - * - * No altsettings, one optional interrupt endpoint. - */ -enum { - CBAF_IFACECLASS = 0xef, - CBAF_IFACESUBCLASS = 0x03, - CBAF_IFACEPROTOCOL = 0x01, -}; - -/* Association Information (AMS1.0[T4-3]) */ -struct wusb_cbaf_assoc_info { - __le16 Length; - __u8 NumAssociationRequests; - __le16 Flags; - __u8 AssociationRequestsArray[]; -} __attribute__((packed)); - -/* Association Request (AMS1.0[T4-4]) */ -struct wusb_cbaf_assoc_request { - __u8 AssociationDataIndex; - __u8 Reserved; - __le16 AssociationTypeId; - __le16 AssociationSubTypeId; - __le32 AssociationTypeInfoSize; -} __attribute__((packed)); - -enum { - AR_TYPE_WUSB = 0x0001, - AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000, - AR_TYPE_WUSB_ASSOCIATE = 0x0001, -}; - -/* Association Attribute header (AMS1.0[3.8]) */ -struct wusb_cbaf_attr_hdr { - __le16 id; - __le16 len; -} __attribute__((packed)); - -/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */ -struct wusb_cbaf_host_info { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr CHID_hdr; - struct wusb_ckhdid CHID; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr HostFriendlyName_hdr; - __u8 HostFriendlyName[]; -} __attribute__((packed)); - -/* Device Info (AMS1.0[T4-8]) - * - * I still don't get this tag'n'header stuff for each goddamn - * field... - */ -struct wusb_cbaf_device_info { - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr CDID_hdr; - struct wusb_ckhdid CDID; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr; - __u8 DeviceFriendlyName[]; -} __attribute__((packed)); - -/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */ -struct wusb_cbaf_cc_data { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr ConnectionContext_hdr; - struct wusb_ckhdid CHID; - struct wusb_ckhdid CDID; - struct wusb_ckhdid CK; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; -} __attribute__((packed)); - -/* CC_DATA - Failure case (AMS1.0[T4-10]) */ -struct wusb_cbaf_cc_data_fail { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le16 Length; - struct wusb_cbaf_attr_hdr AssociationStatus_hdr; - __u32 AssociationStatus; -} __attribute__((packed)); - -#endif /* __LINUX_USB_ASSOCIATION_H */ diff --git a/drivers/staging/wusbcore/include/wusb-wa.h b/drivers/staging/wusbcore/include/wusb-wa.h deleted file mode 100644 index 64a840b5106e..000000000000 --- a/drivers/staging/wusbcore/include/wusb-wa.h +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Wire Adapter constants and structures. - * - * Copyright (C) 2005-2006 Intel Corporation. - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - * - * References: - * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 - */ -#ifndef __LINUX_USB_WUSB_WA_H -#define __LINUX_USB_WUSB_WA_H - -/** - * Radio Command Request for the Radio Control Interface - * - * Radio Control Interface command and event codes are the same as - * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* - */ -enum { - WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ -}; - -/* Wireless Adapter Requests ([WUSB] table 8-51) */ -enum { - WUSB_REQ_ADD_MMC_IE = 20, - WUSB_REQ_REMOVE_MMC_IE = 21, - WUSB_REQ_SET_NUM_DNTS = 22, - WUSB_REQ_SET_CLUSTER_ID = 23, - WUSB_REQ_SET_DEV_INFO = 24, - WUSB_REQ_GET_TIME = 25, - WUSB_REQ_SET_STREAM_IDX = 26, - WUSB_REQ_SET_WUSB_MAS = 27, - WUSB_REQ_CHAN_STOP = 28, -}; - - -/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ -enum { - WUSB_TIME_ADJ = 0, - WUSB_TIME_BPST = 1, - WUSB_TIME_WUSB = 2, -}; - -enum { - WA_ENABLE = 0x01, - WA_RESET = 0x02, - RPIPE_PAUSE = 0x1, - RPIPE_STALL = 0x2, -}; - -/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ -enum { - WA_STATUS_ENABLED = 0x01, - WA_STATUS_RESETTING = 0x02 -}; - -enum rpipe_crs { - RPIPE_CRS_CTL = 0x01, - RPIPE_CRS_ISO = 0x02, - RPIPE_CRS_BULK = 0x04, - RPIPE_CRS_INTR = 0x08 -}; - -/** - * RPipe descriptor ([WUSB] section 8.5.2.11) - * - * FIXME: explain rpipes - */ -struct usb_rpipe_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 wRPipeIndex; - __le16 wRequests; - __le16 wBlocks; /* rw if 0 */ - __le16 wMaxPacketSize; /* rw */ - union { - u8 dwa_bHSHubAddress; /* rw: DWA. */ - u8 hwa_bMaxBurst; /* rw: HWA. */ - }; - union { - u8 dwa_bHSHubPort; /* rw: DWA. */ - u8 hwa_bDeviceInfoIndex; /* rw: HWA. */ - }; - u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ - union { - u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */ - u8 hwa_reserved; /* rw: HWA. */ - }; - u8 bEndpointAddress; /* rw: Target EP address */ - u8 bDataSequence; /* ro: Current Data sequence */ - __le32 dwCurrentWindow; /* ro */ - u8 bMaxDataSequence; /* ro?: max supported seq */ - u8 bInterval; /* rw: */ - u8 bOverTheAirInterval; /* rw: */ - u8 bmAttribute; /* ro? */ - u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ - u8 bmRetryOptions; /* rw? */ - __le16 wNumTransactionErrors; /* rw */ -} __attribute__ ((packed)); - -/** - * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) - * - * These are the notifications coming on the notification endpoint of - * an HWA and a DWA. - */ -enum wa_notif_type { - DWA_NOTIF_RWAKE = 0x91, - DWA_NOTIF_PORTSTATUS = 0x92, - WA_NOTIF_TRANSFER = 0x93, - HWA_NOTIF_BPST_ADJ = 0x94, - HWA_NOTIF_DN = 0x95, -}; - -/** - * Wire Adapter notification header - * - * Notifications coming from a wire adapter use a common header - * defined in [WUSB] sections 8.4.5 & 8.5.4. - */ -struct wa_notif_hdr { - u8 bLength; - u8 bNotifyType; /* enum wa_notif_type */ -} __packed; - -/** - * HWA DN Received notification [(WUSB] section 8.5.4.2) - * - * The DNData is specified in WUSB1.0[7.6]. For each device - * notification we received, we just need to dispatch it. - * - * @dndata: this is really an array of notifications, but all start - * with the same header. - */ -struct hwa_notif_dn { - struct wa_notif_hdr hdr; - u8 bSourceDeviceAddr; /* from errata 2005/07 */ - u8 bmAttributes; - struct wusb_dn_hdr dndata[]; -} __packed; - -/* [WUSB] section 8.3.3 */ -enum wa_xfer_type { - WA_XFER_TYPE_CTL = 0x80, - WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ - WA_XFER_TYPE_ISO = 0x82, - WA_XFER_RESULT = 0x83, - WA_XFER_ABORT = 0x84, - WA_XFER_ISO_PACKET_INFO = 0xA0, - WA_XFER_ISO_PACKET_STATUS = 0xA1, -}; - -/* [WUSB] section 8.3.3 */ -struct wa_xfer_hdr { - u8 bLength; /* 0x18 */ - u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ - __le32 dwTransferLength; /* Length of data to xfer */ - u8 bTransferSegment; -} __packed; - -struct wa_xfer_ctl { - struct wa_xfer_hdr hdr; - u8 bmAttribute; - __le16 wReserved; - struct usb_ctrlrequest baSetupData; -} __packed; - -struct wa_xfer_bi { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wReserved; -} __packed; - -/* [WUSB] section 8.5.5 */ -struct wa_xfer_hwaiso { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wPresentationTime; - __le32 dwNumOfPackets; -} __packed; - -struct wa_xfer_packet_info_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - __le16 PacketLength[0]; -} __packed; - -struct wa_xfer_packet_status_len_hwaiso { - __le16 PacketLength; - __le16 PacketStatus; -} __packed; - -struct wa_xfer_packet_status_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - struct wa_xfer_packet_status_len_hwaiso PacketStatus[0]; -} __packed; - -/* [WUSB] section 8.3.3.5 */ -struct wa_xfer_abort { - u8 bLength; - u8 bRequestType; - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ -} __packed; - -/** - * WA Transfer Complete notification ([WUSB] section 8.3.3.3) - * - */ -struct wa_notif_xfer { - struct wa_notif_hdr hdr; - u8 bEndpoint; - u8 Reserved; -} __packed; - -/** Transfer result basic codes [WUSB] table 8-15 */ -enum { - WA_XFER_STATUS_SUCCESS, - WA_XFER_STATUS_HALTED, - WA_XFER_STATUS_DATA_BUFFER_ERROR, - WA_XFER_STATUS_BABBLE, - WA_XFER_RESERVED, - WA_XFER_STATUS_NOT_FOUND, - WA_XFER_STATUS_INSUFFICIENT_RESOURCE, - WA_XFER_STATUS_TRANSACTION_ERROR, - WA_XFER_STATUS_ABORTED, - WA_XFER_STATUS_RPIPE_NOT_READY, - WA_XFER_INVALID_FORMAT, - WA_XFER_UNEXPECTED_SEGMENT_NUMBER, - WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, -}; - -/** [WUSB] section 8.3.3.4 */ -struct wa_xfer_result { - struct wa_notif_hdr hdr; - __le32 dwTransferID; - __le32 dwTransferLength; - u8 bTransferSegment; - u8 bTransferStatus; - __le32 dwNumOfPackets; -} __packed; - -/** - * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). - * - * NOTE: u16 fields are read Little Endian from the hardware. - * - * @bNumPorts is the original max number of devices that the host can - * connect; we might chop this so the stack can handle - * it. In case you need to access it, use wusbhc->ports_max - * if it is a Wireless USB WA. - */ -struct usb_wa_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 bcdWAVersion; - u8 bNumPorts; /* don't use!! */ - u8 bmAttributes; /* Reserved == 0 */ - __le16 wNumRPipes; - __le16 wRPipeMaxBlock; - u8 bRPipeBlockSize; - u8 bPwrOn2PwrGood; - u8 bNumMMCIEs; - u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ -} __packed; - -/** - * HWA Device Information Buffer (WUSB1.0[T8.54]) - */ -struct hwa_dev_info { - u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ - u8 bDeviceAddress; - __le16 wPHYRates; - u8 bmDeviceAttribute; -} __packed; - -#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/drivers/staging/wusbcore/include/wusb.h b/drivers/staging/wusbcore/include/wusb.h deleted file mode 100644 index 09771d1da7bc..000000000000 --- a/drivers/staging/wusbcore/include/wusb.h +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Standard Definitions - * Event Size Tables - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - */ - -#ifndef __WUSB_H__ -#define __WUSB_H__ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/usb/ch9.h> -#include <linux/param.h> -#include "../../uwb/include/spec.h" - -/** - * WUSB Information Element header - * - * I don't know why, they decided to make it different to the MBOA MAC - * IE Header; beats me. - */ -struct wuie_hdr { - u8 bLength; - u8 bIEIdentifier; -} __attribute__((packed)); - -enum { - WUIE_ID_WCTA = 0x80, - WUIE_ID_CONNECTACK, - WUIE_ID_HOST_INFO, - WUIE_ID_CHANGE_ANNOUNCE, - WUIE_ID_DEVICE_DISCONNECT, - WUIE_ID_HOST_DISCONNECT, - WUIE_ID_KEEP_ALIVE = 0x89, - WUIE_ID_ISOCH_DISCARD, - WUIE_ID_RESET_DEVICE, -}; - -/** - * Maximum number of array elements in a WUSB IE. - * - * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that - * are "arrays" have to limited to 4 elements. So we define it - * like that to ease up and submit only the neeed size. - */ -#define WUIE_ELT_MAX 4 - -/** - * Wrapper for the data that defines a CHID, a CDID or a CK - * - * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of - * data. In order to avoid confusion and enforce types, we wrap it. - * - * Make it packed, as we use it in some hw definitions. - */ -struct wusb_ckhdid { - u8 data[16]; -} __attribute__((packed)); - -static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; - -#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) - -/** - * WUSB IE: Host Information (WUSB1.0[7.5.2]) - * - * Used to provide information about the host to the Wireless USB - * devices in range (CHID can be used as an ASCII string). - */ -struct wuie_host_info { - struct wuie_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CHID; -} __attribute__((packed)); - -/** - * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) - * - * Used to acknowledge device connect requests. See note for - * WUIE_ELT_MAX. - */ -struct wuie_connect_ack { - struct wuie_hdr hdr; - struct { - struct wusb_ckhdid CDID; - u8 bDeviceAddress; /* 0 means unused */ - u8 bReserved; - } blk[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE Host Information Element, Connect Availability - * - * WUSB1.0[7.5.2], bmAttributes description - */ -enum { - WUIE_HI_CAP_RECONNECT = 0, - WUIE_HI_CAP_LIMITED, - WUIE_HI_CAP_RESERVED, - WUIE_HI_CAP_ALL, -}; - -/** - * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) - * - * Tells devices the host is going to stop sending MMCs and will disappear. - */ -struct wuie_channel_stop { - struct wuie_hdr hdr; - u8 attributes; - u8 timestamp[3]; -} __attribute__((packed)); - -/** - * WUSB IE: Keepalive (WUSB1.0[7.5.9]) - * - * Ask device(s) to send keepalives. - */ -struct wuie_keep_alive { - struct wuie_hdr hdr; - u8 bDeviceAddress[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE: Reset device (WUSB1.0[7.5.11]) - * - * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only - * use it for one at the time... - * - * In any case, this request is a wee bit silly: why don't they target - * by address?? - */ -struct wuie_reset { - struct wuie_hdr hdr; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -/** - * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) - * - * Tell device to disconnect; we can fit 4 addresses, but we only use - * it for one at the time... - */ -struct wuie_disconnect { - struct wuie_hdr hdr; - u8 bDeviceAddress; - u8 padding; -} __attribute__((packed)); - -/** - * WUSB IE: Host disconnect ([WUSB] section 7.5.5) - * - * Tells all connected devices to disconnect. - */ -struct wuie_host_disconnect { - struct wuie_hdr hdr; -} __attribute__((packed)); - -/** - * WUSB Device Notification header (WUSB1.0[7.6]) - */ -struct wusb_dn_hdr { - u8 bType; - u8 notifdata[]; -} __attribute__((packed)); - -/** Device Notification codes (WUSB1.0[Table 7-54]) */ -enum WUSB_DN { - WUSB_DN_CONNECT = 0x01, - WUSB_DN_DISCONNECT = 0x02, - WUSB_DN_EPRDY = 0x03, - WUSB_DN_MASAVAILCHANGED = 0x04, - WUSB_DN_RWAKE = 0x05, - WUSB_DN_SLEEP = 0x06, - WUSB_DN_ALIVE = 0x07, -}; - -/** WUSB Device Notification Connect */ -struct wusb_dn_connect { - struct wusb_dn_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) -{ - return le16_to_cpu(dn->attributes) & 0xff; -} - -static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 8) & 0x1; -} - -static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 9) & 0x03; -} - -/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ -struct wusb_dn_alive { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/** Device is disconnecting (WUSB1.0[7.6.2]) */ -struct wusb_dn_disconnect { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/* General constants */ -enum { - WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ -}; - -/* - * WUSB Crypto stuff (WUSB1.0[6]) - */ - -extern const char *wusb_et_name(u8); - -/** - * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for - * the host or the device. - */ -static inline u8 wusb_key_index(int index, int type, int originator) -{ - return (originator << 6) | (type << 4) | index; -} - -#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ -#define WUSB_KEY_INDEX_TYPE_ASSOC 1 -#define WUSB_KEY_INDEX_TYPE_GTK 2 -#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 -#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 -/* bits 0-3 used for the key index. */ -#define WUSB_KEY_INDEX_MAX 15 - -/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ -struct aes_ccm_nonce { - u8 sfn[6]; /* Little Endian */ - u8 tkid[3]; /* LE */ - struct uwb_dev_addr dest_addr; - struct uwb_dev_addr src_addr; -} __attribute__((packed)); - -/* A CCM operation label, defined on WUSB1.0[6.5.x] */ -struct aes_ccm_label { - u8 data[14]; -} __attribute__((packed)); - -/* - * Input to the key derivation sequence defined in - * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the - * PRF function. - */ -struct wusb_keydvt_in { - u8 hnonce[16]; - u8 dnonce[16]; -} __attribute__((packed)); - -/* - * Output from the key derivation sequence defined in - * WUSB1.0[6.5.1]. - */ -struct wusb_keydvt_out { - u8 kck[16]; - u8 ptk[16]; -} __attribute__((packed)); - -/* Pseudo Random Function WUSB1.0[6.5] */ -extern int wusb_crypto_init(void); -extern void wusb_crypto_exit(void); -extern ssize_t wusb_prf(void *out, size_t out_size, - const u8 key[16], const struct aes_ccm_nonce *_n, - const struct aes_ccm_label *a, - const void *b, size_t blen, size_t len); - -static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 64); -} - -static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 128); -} - -static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 256); -} - -/* Key derivation WUSB1.0[6.5.1] */ -static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, - const u8 key[16], - const struct aes_ccm_nonce *n, - const struct wusb_keydvt_in *keydvt_in) -{ - const struct aes_ccm_label a = { .data = "Pair-wise keys" }; - return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, - keydvt_in, sizeof(*keydvt_in)); -} - -/* - * Out-of-band MIC Generation WUSB1.0[6.5.2] - * - * Compute the MIC over @key, @n and @hs and place it in @mic_out. - * - * @mic_out: Where to place the 8 byte MIC tag - * @key: KCK from the derivation process - * @n: CCM nonce, n->sfn == 0, TKID as established in the - * process. - * @hs: Handshake struct for phase 2 of the 4-way. - * hs->bStatus and hs->bReserved are zero. - * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] - * hs->dest_addr is the device's USB address padded with 0 - * hs->src_addr is the hosts's UWB device address - * hs->mic is ignored (as we compute that value). - */ -static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], - const struct aes_ccm_nonce *n, - const struct usb_handshake *hs) -{ - const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; - return wusb_prf_64(mic_out, 8, key, n, &a, - hs, sizeof(*hs) - sizeof(hs->MIC)); -} - -#endif /* #ifndef __WUSB_H__ */ diff --git a/drivers/staging/wusbcore/mmc.c b/drivers/staging/wusbcore/mmc.c deleted file mode 100644 index 881e1f20d718..000000000000 --- a/drivers/staging/wusbcore/mmc.c +++ /dev/null @@ -1,303 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) - * MMC (Microscheduled Management Command) handling - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * WUIEs and MMC IEs...well, they are almost the same at the end. MMC - * IEs are Wireless USB IEs that go into the MMC period...[what is - * that? look in Design-overview.txt]. - * - * - * This is a simple subsystem to keep track of which IEs are being - * sent by the host in the MMC period. - * - * For each WUIE we ask to send, we keep it in an array, so we can - * request its removal later, or replace the content. They are tracked - * by pointer, so be sure to use the same pointer if you want to - * remove it or update the contents. - * - * FIXME: - * - add timers that autoremove intervalled IEs? - */ -#include <linux/slab.h> -#include <linux/export.h> -#include "include/wusb.h" -#include "wusbhc.h" - -/* Initialize the MMCIEs handling mechanism */ -int wusbhc_mmcie_create(struct wusbhc *wusbhc) -{ - u8 mmcies = wusbhc->mmcies_max; - wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL); - if (wusbhc->mmcie == NULL) - return -ENOMEM; - mutex_init(&wusbhc->mmcie_mutex); - return 0; -} - -/* Release resources used by the MMCIEs handling mechanism */ -void wusbhc_mmcie_destroy(struct wusbhc *wusbhc) -{ - kfree(wusbhc->mmcie); -} - -/* - * Add or replace an MMC Wireless USB IE. - * - * @interval: See WUSB1.0[8.5.3.1] - * @repeat_cnt: See WUSB1.0[8.5.3.1] - * @handle: See WUSB1.0[8.5.3.1] - * @wuie: Pointer to the header of the WUSB IE data to add. - * MUST BE allocated in a kmalloc buffer (no stack or - * vmalloc). - * THE CALLER ALWAYS OWNS THE POINTER (we don't free it - * on remove, we just forget about it). - * @returns: 0 if ok, < 0 errno code on error. - * - * Goes over the *whole* @wusbhc->mmcie array looking for (a) the - * first free spot and (b) if @wuie is already in the array (aka: - * transmitted in the MMCs) the spot were it is. - * - * If present, we "overwrite it" (update). - * - * - * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38. - * The host uses the handle as the 'sort' index. We - * allocate the last one always for the WUIE_ID_HOST_INFO, and - * the rest, first come first serve in inverse order. - * - * Host software must make sure that it adds the other IEs in - * the right order... the host hardware is responsible for - * placing the WCTA IEs in the right place with the other IEs - * set by host software. - * - * NOTE: we can access wusbhc->wa_descr without locking because it is - * read only. - */ -int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, - struct wuie_hdr *wuie) -{ - int result = -ENOBUFS; - unsigned handle, itr; - - /* Search a handle, taking into account the ordering */ - mutex_lock(&wusbhc->mmcie_mutex); - switch (wuie->bIEIdentifier) { - case WUIE_ID_HOST_INFO: - /* Always last */ - handle = wusbhc->mmcies_max - 1; - break; - case WUIE_ID_ISOCH_DISCARD: - dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x " - "unimplemented\n", wuie->bIEIdentifier); - result = -ENOSYS; - goto error_unlock; - default: - /* search for it or find the last empty slot */ - handle = ~0; - for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) { - if (wusbhc->mmcie[itr] == wuie) { - handle = itr; - break; - } - if (wusbhc->mmcie[itr] == NULL) - handle = itr; - } - if (handle == ~0) - goto error_unlock; - } - result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle, - wuie); - if (result >= 0) - wusbhc->mmcie[handle] = wuie; -error_unlock: - mutex_unlock(&wusbhc->mmcie_mutex); - return result; -} -EXPORT_SYMBOL_GPL(wusbhc_mmcie_set); - -/* - * Remove an MMC IE previously added with wusbhc_mmcie_set() - * - * @wuie Pointer used to add the WUIE - */ -void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie) -{ - int result; - unsigned handle, itr; - - mutex_lock(&wusbhc->mmcie_mutex); - for (itr = 0; itr < wusbhc->mmcies_max; itr++) { - if (wusbhc->mmcie[itr] == wuie) { - handle = itr; - goto found; - } - } - mutex_unlock(&wusbhc->mmcie_mutex); - return; - -found: - result = (wusbhc->mmcie_rm)(wusbhc, handle); - if (result == 0) - wusbhc->mmcie[itr] = NULL; - mutex_unlock(&wusbhc->mmcie_mutex); -} -EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); - -static int wusbhc_mmc_start(struct wusbhc *wusbhc) -{ - int ret; - - mutex_lock(&wusbhc->mutex); - ret = wusbhc->start(wusbhc); - if (ret >= 0) - wusbhc->active = 1; - mutex_unlock(&wusbhc->mutex); - - return ret; -} - -static void wusbhc_mmc_stop(struct wusbhc *wusbhc) -{ - mutex_lock(&wusbhc->mutex); - wusbhc->active = 0; - wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS); - mutex_unlock(&wusbhc->mutex); -} - -/* - * wusbhc_start - start transmitting MMCs and accepting connections - * @wusbhc: the HC to start - * - * Establishes a cluster reservation, enables device connections, and - * starts MMCs with appropriate DNTS parameters. - */ -int wusbhc_start(struct wusbhc *wusbhc) -{ - int result; - struct device *dev = wusbhc->dev; - - WARN_ON(wusbhc->wuie_host_info != NULL); - BUG_ON(wusbhc->uwb_rc == NULL); - - result = wusbhc_rsv_establish(wusbhc); - if (result < 0) { - dev_err(dev, "cannot establish cluster reservation: %d\n", - result); - goto error_rsv_establish; - } - - result = wusbhc_devconnect_start(wusbhc); - if (result < 0) { - dev_err(dev, "error enabling device connections: %d\n", - result); - goto error_devconnect_start; - } - - result = wusbhc_sec_start(wusbhc); - if (result < 0) { - dev_err(dev, "error starting security in the HC: %d\n", - result); - goto error_sec_start; - } - - result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval, - wusbhc->dnts_num_slots); - if (result < 0) { - dev_err(dev, "Cannot set DNTS parameters: %d\n", result); - goto error_set_num_dnts; - } - result = wusbhc_mmc_start(wusbhc); - if (result < 0) { - dev_err(dev, "error starting wusbch: %d\n", result); - goto error_wusbhc_start; - } - - return 0; - -error_wusbhc_start: - wusbhc_sec_stop(wusbhc); -error_set_num_dnts: -error_sec_start: - wusbhc_devconnect_stop(wusbhc); -error_devconnect_start: - wusbhc_rsv_terminate(wusbhc); -error_rsv_establish: - return result; -} - -/* - * wusbhc_stop - stop transmitting MMCs - * @wusbhc: the HC to stop - * - * Stops the WUSB channel and removes the cluster reservation. - */ -void wusbhc_stop(struct wusbhc *wusbhc) -{ - wusbhc_mmc_stop(wusbhc); - wusbhc_sec_stop(wusbhc); - wusbhc_devconnect_stop(wusbhc); - wusbhc_rsv_terminate(wusbhc); -} - -/* - * Set/reset/update a new CHID - * - * Depending on the previous state of the MMCs, start, stop or change - * the sent MMC. This effectively switches the host controller on and - * off (radio wise). - */ -int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) -{ - int result = 0; - - if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0) - chid = NULL; - - mutex_lock(&wusbhc->mutex); - if (chid) { - if (wusbhc->active) { - mutex_unlock(&wusbhc->mutex); - return -EBUSY; - } - wusbhc->chid = *chid; - } - - /* register with UWB if we haven't already since we are about to start - the radio. */ - if ((chid) && (wusbhc->uwb_rc == NULL)) { - wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent); - if (wusbhc->uwb_rc == NULL) { - result = -ENODEV; - dev_err(wusbhc->dev, - "Cannot get associated UWB Host Controller\n"); - goto error_rc_get; - } - - result = wusbhc_pal_register(wusbhc); - if (result < 0) { - dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n"); - goto error_pal_register; - } - } - mutex_unlock(&wusbhc->mutex); - - if (chid) - result = uwb_radio_start(&wusbhc->pal); - else if (wusbhc->uwb_rc) - uwb_radio_stop(&wusbhc->pal); - - return result; - -error_pal_register: - uwb_rc_put(wusbhc->uwb_rc); - wusbhc->uwb_rc = NULL; -error_rc_get: - mutex_unlock(&wusbhc->mutex); - - return result; -} -EXPORT_SYMBOL_GPL(wusbhc_chid_set); diff --git a/drivers/staging/wusbcore/pal.c b/drivers/staging/wusbcore/pal.c deleted file mode 100644 index 30f569131471..000000000000 --- a/drivers/staging/wusbcore/pal.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Host Controller - * UWB Protocol Adaptation Layer (PAL) glue. - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#include "wusbhc.h" - -static void wusbhc_channel_changed(struct uwb_pal *pal, int channel) -{ - struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal); - - dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel); - if (channel < 0) - wusbhc_stop(wusbhc); - else - wusbhc_start(wusbhc); -} - -/** - * wusbhc_pal_register - register the WUSB HC as a UWB PAL - * @wusbhc: the WUSB HC - */ -int wusbhc_pal_register(struct wusbhc *wusbhc) -{ - uwb_pal_init(&wusbhc->pal); - - wusbhc->pal.name = "wusbhc"; - wusbhc->pal.device = wusbhc->usb_hcd.self.controller; - wusbhc->pal.rc = wusbhc->uwb_rc; - wusbhc->pal.channel_changed = wusbhc_channel_changed; - - return uwb_pal_register(&wusbhc->pal); -} - -/** - * wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL - * @wusbhc: the WUSB HC - */ -void wusbhc_pal_unregister(struct wusbhc *wusbhc) -{ - if (wusbhc->uwb_rc) - uwb_pal_unregister(&wusbhc->pal); -} diff --git a/drivers/staging/wusbcore/reservation.c b/drivers/staging/wusbcore/reservation.c deleted file mode 100644 index b921faac698b..000000000000 --- a/drivers/staging/wusbcore/reservation.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB cluster reservation management - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - */ -#include <linux/kernel.h> - -#include "../uwb/uwb.h" -#include "wusbhc.h" - -/* - * WUSB cluster reservations are multicast reservations with the - * broadcast cluster ID (BCID) as the target DevAddr. - * - * FIXME: consider adjusting the reservation depending on what devices - * are attached. - */ - -static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream, - const struct uwb_mas_bm *mas) -{ - if (mas == NULL) - mas = &uwb_mas_bm_zero; - return wusbhc->bwa_set(wusbhc, stream, mas); -} - -/** - * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback - * @rsv: the reservation - * - * Either set or clear the HC's view of the reservation. - * - * FIXME: when a reservation is denied the HC should be stopped. - */ -static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv) -{ - struct wusbhc *wusbhc = rsv->pal_priv; - struct device *dev = wusbhc->dev; - struct uwb_mas_bm mas; - - dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state); - switch (rsv->state) { - case UWB_RSV_STATE_O_ESTABLISHED: - uwb_rsv_get_usable_mas(rsv, &mas); - dev_dbg(dev, "established reservation: %*pb\n", - UWB_NUM_MAS, mas.bm); - wusbhc_bwa_set(wusbhc, rsv->stream, &mas); - break; - case UWB_RSV_STATE_NONE: - dev_dbg(dev, "removed reservation\n"); - wusbhc_bwa_set(wusbhc, 0, NULL); - break; - default: - dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); - break; - } -} - - -/** - * wusbhc_rsv_establish - establish a reservation for the cluster - * @wusbhc: the WUSB HC requesting a bandwidth reservation - */ -int wusbhc_rsv_establish(struct wusbhc *wusbhc) -{ - struct uwb_rc *rc = wusbhc->uwb_rc; - struct uwb_rsv *rsv; - struct uwb_dev_addr bcid; - int ret; - - if (rc == NULL) - return -ENODEV; - - rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc); - if (rsv == NULL) - return -ENOMEM; - - bcid.data[0] = wusbhc->cluster_id; - bcid.data[1] = 0; - - rsv->target.type = UWB_RSV_TARGET_DEVADDR; - rsv->target.devaddr = bcid; - rsv->type = UWB_DRP_TYPE_PRIVATE; - rsv->max_mas = 256; /* try to get as much as possible */ - rsv->min_mas = 15; /* one MAS per zone */ - rsv->max_interval = 1; /* max latency is one zone */ - rsv->is_multicast = true; - - ret = uwb_rsv_establish(rsv); - if (ret == 0) - wusbhc->rsv = rsv; - else - uwb_rsv_destroy(rsv); - return ret; -} - - -/** - * wusbhc_rsv_terminate - terminate the cluster reservation - * @wusbhc: the WUSB host whose reservation is to be terminated - */ -void wusbhc_rsv_terminate(struct wusbhc *wusbhc) -{ - if (wusbhc->rsv) { - uwb_rsv_terminate(wusbhc->rsv); - uwb_rsv_destroy(wusbhc->rsv); - wusbhc->rsv = NULL; - } -} diff --git a/drivers/staging/wusbcore/rh.c b/drivers/staging/wusbcore/rh.c deleted file mode 100644 index 20c08cd9dcbf..000000000000 --- a/drivers/staging/wusbcore/rh.c +++ /dev/null @@ -1,426 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Host Controller - * Root Hub operations - * - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * We fake a root hub that has fake ports (as many as simultaneous - * devices the Wireless USB Host Controller can deal with). For each - * port we keep an state in @wusbhc->port[index] identical to the one - * specified in the USB2.0[ch11] spec and some extra device - * information that complements the one in 'struct usb_device' (as - * this lacs a hcpriv pointer). - * - * Note this is common to WHCI and HWA host controllers. - * - * Through here we enable most of the state changes that the USB stack - * will use to connect or disconnect devices. We need to do some - * forced adaptation of Wireless USB device states vs. wired: - * - * USB: WUSB: - * - * Port Powered-off port slot n/a - * Powered-on port slot available - * Disconnected port slot available - * Connected port slot assigned device - * device sent DN_Connect - * device was authenticated - * Enabled device is authenticated, transitioned - * from unauth -> auth -> default address - * -> enabled - * Reset disconnect - * Disable disconnect - * - * This maps the standard USB port states with the WUSB device states - * so we can fake ports without having to modify the USB stack. - * - * FIXME: this process will change in the future - * - * - * ENTRY POINTS - * - * Our entry points into here are, as in hcd.c, the USB stack root hub - * ops defined in the usb_hcd struct: - * - * wusbhc_rh_status_data() Provide hub and port status data bitmap - * - * wusbhc_rh_control() Execution of all the major requests - * you can do to a hub (Set|Clear - * features, get descriptors, status, etc). - * - * wusbhc_rh_[suspend|resume]() That - * - * wusbhc_rh_start_port_reset() ??? unimplemented - */ -#include <linux/slab.h> -#include <linux/export.h> -#include "wusbhc.h" - -/* - * Reset a fake port - * - * Using a Reset Device IE is too heavyweight as it causes the device - * to enter the UnConnected state and leave the cluster, this can mean - * that when the device reconnects it is connected to a different fake - * port. - * - * Instead, reset authenticated devices with a SetAddress(0), followed - * by a SetAddresss(AuthAddr). - * - * For unauthenticated devices just pretend to reset but do nothing. - * If the device initialization continues to fail it will eventually - * time out after TrustTimeout and enter the UnConnected state. - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - * - * Supposedly we are the only thread accesing @wusbhc->port; in any - * case, maybe we should move the mutex locking from - * wusbhc_devconnect_auth() to here. - * - * @port_idx refers to the wusbhc's port index, not the USB port number - */ -static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx) -{ - int result = 0; - struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); - struct wusb_dev *wusb_dev = port->wusb_dev; - - if (wusb_dev == NULL) - return -ENOTCONN; - - port->status |= USB_PORT_STAT_RESET; - port->change |= USB_PORT_STAT_C_RESET; - - if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH) - result = 0; - else - result = wusb_dev_update_address(wusbhc, wusb_dev); - - port->status &= ~USB_PORT_STAT_RESET; - port->status |= USB_PORT_STAT_ENABLE; - port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; - - return result; -} - -/* - * Return the hub change status bitmap - * - * The bits in the change status bitmap are cleared when a - * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4]. - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - * - * WARNING!! This gets called from atomic context; we cannot get the - * mutex--the only race condition we can find is some bit - * changing just after we copy it, which shouldn't be too - * big of a problem [and we can't make it an spinlock - * because other parts need to take it and sleep] . - * - * @usb_hcd is refcounted, so it won't disappear under us - * and before killing a host, the polling of the root hub - * would be stopped anyway. - */ -int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - size_t cnt, size, bits_set = 0; - - /* WE DON'T LOCK, see comment */ - /* round up to bytes. Hub bit is bit 0 so add 1. */ - size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8); - - /* clear the output buffer. */ - memset(_buf, 0, size); - /* set the bit for each changed port. */ - for (cnt = 0; cnt < wusbhc->ports_max; cnt++) { - - if (wusb_port_by_idx(wusbhc, cnt)->change) { - const int bitpos = cnt+1; - - _buf[bitpos/8] |= (1 << (bitpos % 8)); - bits_set++; - } - } - - return bits_set ? size : 0; -} -EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); - -/* - * Return the hub's descriptor - * - * NOTE: almost cut and paste from ehci-hub.c - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked - */ -static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue, - u16 wIndex, - struct usb_hub_descriptor *descr, - u16 wLength) -{ - u16 temp = 1 + (wusbhc->ports_max / 8); - u8 length = 7 + 2 * temp; - - if (wLength < length) - return -ENOSPC; - descr->bDescLength = 7 + 2 * temp; - descr->bDescriptorType = USB_DT_HUB; /* HUB type */ - descr->bNbrPorts = wusbhc->ports_max; - descr->wHubCharacteristics = cpu_to_le16( - HUB_CHAR_COMMON_LPSM /* All ports power at once */ - | 0x00 /* not part of compound device */ - | HUB_CHAR_NO_OCPM /* No overcurrent protection */ - | 0x00 /* 8 FS think time FIXME ?? */ - | 0x00); /* No port indicators */ - descr->bPwrOn2PwrGood = 0; - descr->bHubContrCurrent = 0; - /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ - memset(&descr->u.hs.DeviceRemovable[0], 0, temp); - memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp); - return 0; -} - -/* - * Clear a hub feature - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - * - * Nothing to do, so no locking needed ;) - */ -static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) -{ - int result; - - switch (feature) { - case C_HUB_LOCAL_POWER: - /* FIXME: maybe plug bit 0 to the power input status, - * if any? - * see wusbhc_rh_get_hub_status() */ - case C_HUB_OVER_CURRENT: - result = 0; - break; - default: - result = -EPIPE; - } - return result; -} - -/* - * Return hub status (it is always zero...) - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - * - * Nothing to do, so no locking needed ;) - */ -static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf, - u16 wLength) -{ - /* FIXME: maybe plug bit 0 to the power input status (if any)? */ - *buf = 0; - return 0; -} - -/* - * Set a port feature - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - */ -static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, - u8 selector, u8 port_idx) -{ - struct device *dev = wusbhc->dev; - - if (port_idx > wusbhc->ports_max) - return -EINVAL; - - switch (feature) { - /* According to USB2.0[11.24.2.13]p2, these features - * are not required to be implemented. */ - case USB_PORT_FEAT_C_OVER_CURRENT: - case USB_PORT_FEAT_C_ENABLE: - case USB_PORT_FEAT_C_SUSPEND: - case USB_PORT_FEAT_C_CONNECTION: - case USB_PORT_FEAT_C_RESET: - return 0; - case USB_PORT_FEAT_POWER: - /* No such thing, but we fake it works */ - mutex_lock(&wusbhc->mutex); - wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; - mutex_unlock(&wusbhc->mutex); - return 0; - case USB_PORT_FEAT_RESET: - return wusbhc_rh_port_reset(wusbhc, port_idx); - case USB_PORT_FEAT_ENABLE: - case USB_PORT_FEAT_SUSPEND: - dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", - port_idx, feature, selector); - return -ENOSYS; - default: - dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", - port_idx, feature, selector); - return -EPIPE; - } - - return 0; -} - -/* - * Clear a port feature... - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - */ -static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, - u8 selector, u8 port_idx) -{ - int result = 0; - struct device *dev = wusbhc->dev; - - if (port_idx > wusbhc->ports_max) - return -EINVAL; - - mutex_lock(&wusbhc->mutex); - switch (feature) { - case USB_PORT_FEAT_POWER: /* fake port always on */ - /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ - case USB_PORT_FEAT_C_OVER_CURRENT: - break; - case USB_PORT_FEAT_C_RESET: - wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET; - break; - case USB_PORT_FEAT_C_CONNECTION: - wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION; - break; - case USB_PORT_FEAT_ENABLE: - __wusbhc_dev_disable(wusbhc, port_idx); - break; - case USB_PORT_FEAT_C_ENABLE: - wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE; - break; - case USB_PORT_FEAT_SUSPEND: - case USB_PORT_FEAT_C_SUSPEND: - dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", - port_idx, feature, selector); - result = -ENOSYS; - break; - default: - dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n", - port_idx, feature, selector); - result = -EPIPE; - break; - } - mutex_unlock(&wusbhc->mutex); - - return result; -} - -/* - * Return the port's status - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - */ -static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, - u32 *_buf, u16 wLength) -{ - __le16 *buf = (__le16 *)_buf; - - if (port_idx > wusbhc->ports_max) - return -EINVAL; - - mutex_lock(&wusbhc->mutex); - buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); - buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); - mutex_unlock(&wusbhc->mutex); - - return 0; -} - -/* - * Entry point for Root Hub operations - * - * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. - */ -int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue, - u16 wIndex, char *buf, u16 wLength) -{ - int result = -ENOSYS; - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - - switch (reqntype) { - case GetHubDescriptor: - result = wusbhc_rh_get_hub_descr( - wusbhc, wValue, wIndex, - (struct usb_hub_descriptor *) buf, wLength); - break; - case ClearHubFeature: - result = wusbhc_rh_clear_hub_feat(wusbhc, wValue); - break; - case GetHubStatus: - result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength); - break; - - case SetPortFeature: - result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8, - (wIndex & 0xff) - 1); - break; - case ClearPortFeature: - result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8, - (wIndex & 0xff) - 1); - break; - case GetPortStatus: - result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1, - (u32 *)buf, wLength); - break; - - case SetHubFeature: - default: - dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) " - "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype, - wValue, wIndex, buf, wLength); - /* dump_stack(); */ - result = -ENOSYS; - } - return result; -} -EXPORT_SYMBOL_GPL(wusbhc_rh_control); - -int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx) -{ - struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); - dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n", - __func__, usb_hcd, wusbhc, port_idx); - WARN_ON(1); - return -ENOSYS; -} -EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset); - -static void wusb_port_init(struct wusb_port *port) -{ - port->status |= USB_PORT_STAT_HIGH_SPEED; -} - -/* - * Alloc fake port specific fields and status. - */ -int wusbhc_rh_create(struct wusbhc *wusbhc) -{ - int result = -ENOMEM; - size_t port_size, itr; - port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]); - wusbhc->port = kzalloc(port_size, GFP_KERNEL); - if (wusbhc->port == NULL) - goto error_port_alloc; - for (itr = 0; itr < wusbhc->ports_max; itr++) - wusb_port_init(&wusbhc->port[itr]); - result = 0; -error_port_alloc: - return result; -} - -void wusbhc_rh_destroy(struct wusbhc *wusbhc) -{ - kfree(wusbhc->port); -} diff --git a/drivers/staging/wusbcore/security.c b/drivers/staging/wusbcore/security.c deleted file mode 100644 index 14ac8c98ac9e..000000000000 --- a/drivers/staging/wusbcore/security.c +++ /dev/null @@ -1,599 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Host Controller - * Security support: encryption enablement, etc - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/usb/ch9.h> -#include <linux/random.h> -#include <linux/export.h> -#include "wusbhc.h" -#include <asm/unaligned.h> - -static void wusbhc_gtk_rekey_work(struct work_struct *work); - -int wusbhc_sec_create(struct wusbhc *wusbhc) -{ - /* - * WQ is singlethread because we need to serialize rekey operations. - * Use a separate workqueue for security operations instead of the - * wusbd workqueue because security operations may need to communicate - * directly with downstream wireless devices using synchronous URBs. - * If a device is not responding, this could block other host - * controller operations. - */ - wusbhc->wq_security = create_singlethread_workqueue("wusbd_security"); - if (wusbhc->wq_security == NULL) { - pr_err("WUSB-core: Cannot create wusbd_security workqueue\n"); - return -ENOMEM; - } - - wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + - sizeof(wusbhc->gtk.data); - wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; - wusbhc->gtk.descr.bReserved = 0; - wusbhc->gtk_index = 0; - - INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work); - - return 0; -} - - -/* Called when the HC is destroyed */ -void wusbhc_sec_destroy(struct wusbhc *wusbhc) -{ - destroy_workqueue(wusbhc->wq_security); -} - - -/** - * wusbhc_next_tkid - generate a new, currently unused, TKID - * @wusbhc: the WUSB host controller - * @wusb_dev: the device whose PTK the TKID is for - * (or NULL for a TKID for a GTK) - * - * The generated TKID consists of two parts: the device's authenticated - * address (or 0 or a GTK); and an incrementing number. This ensures - * that TKIDs cannot be shared between devices and by the time the - * incrementing number wraps around the older TKIDs will no longer be - * in use (a maximum of two keys may be active at any one time). - */ -static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - u32 *tkid; - u32 addr; - - if (wusb_dev == NULL) { - tkid = &wusbhc->gtk_tkid; - addr = 0; - } else { - tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid; - addr = wusb_dev->addr & 0x7f; - } - - *tkid = (addr << 8) | ((*tkid + 1) & 0xff); - - return *tkid; -} - -static void wusbhc_generate_gtk(struct wusbhc *wusbhc) -{ - const size_t key_size = sizeof(wusbhc->gtk.data); - u32 tkid; - - tkid = wusbhc_next_tkid(wusbhc, NULL); - - wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff; - wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff; - wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff; - - get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size); -} - -/** - * wusbhc_sec_start - start the security management process - * @wusbhc: the WUSB host controller - * - * Generate and set an initial GTK on the host controller. - * - * Called when the HC is started. - */ -int wusbhc_sec_start(struct wusbhc *wusbhc) -{ - const size_t key_size = sizeof(wusbhc->gtk.data); - int result; - - wusbhc_generate_gtk(wusbhc); - - result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, - &wusbhc->gtk.descr.bKeyData, key_size); - if (result < 0) - dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", - result); - - return result; -} - -/** - * wusbhc_sec_stop - stop the security management process - * @wusbhc: the WUSB host controller - * - * Wait for any pending GTK rekeys to stop. - */ -void wusbhc_sec_stop(struct wusbhc *wusbhc) -{ - cancel_work_sync(&wusbhc->gtk_rekey_work); -} - - -/** @returns encryption type name */ -const char *wusb_et_name(u8 x) -{ - switch (x) { - case USB_ENC_TYPE_UNSECURE: return "unsecure"; - case USB_ENC_TYPE_WIRED: return "wired"; - case USB_ENC_TYPE_CCM_1: return "CCM-1"; - case USB_ENC_TYPE_RSA_1: return "RSA-1"; - default: return "unknown"; - } -} -EXPORT_SYMBOL_GPL(wusb_et_name); - -/* - * Set the device encryption method - * - * We tell the device which encryption method to use; we do this when - * setting up the device's security. - */ -static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) -{ - int result; - struct device *dev = &usb_dev->dev; - struct wusb_dev *wusb_dev = usb_dev->wusb_dev; - - if (value) { - value = wusb_dev->ccm1_etd.bEncryptionValue; - } else { - /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */ - value = 0; - } - /* Set device's */ - result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_ENCRYPTION, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) - dev_err(dev, "Can't set device's WUSB encryption to " - "%s (value %d): %d\n", - wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType), - wusb_dev->ccm1_etd.bEncryptionValue, result); - return result; -} - -/* - * Set the GTK to be used by a device. - * - * The device must be authenticated. - */ -static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - struct usb_device *usb_dev = wusb_dev->usb_dev; - u8 key_index = wusb_key_index(wusbhc->gtk_index, - WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); - - return usb_control_msg( - usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_DESCRIPTOR, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_DT_KEY << 8 | key_index, 0, - &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, - USB_CTRL_SET_TIMEOUT); -} - - -/* FIXME: prototype for adding security */ -int wusb_dev_sec_add(struct wusbhc *wusbhc, - struct usb_device *usb_dev, struct wusb_dev *wusb_dev) -{ - int result, bytes, secd_size; - struct device *dev = &usb_dev->dev; - struct usb_security_descriptor *secd, *new_secd; - const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; - const void *itr, *top; - char buf[64]; - - secd = kmalloc(sizeof(*secd), GFP_KERNEL); - if (secd == NULL) { - result = -ENOMEM; - goto out; - } - - result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, - 0, secd, sizeof(*secd)); - if (result < (int)sizeof(*secd)) { - dev_err(dev, "Can't read security descriptor or " - "not enough data: %d\n", result); - goto out; - } - secd_size = le16_to_cpu(secd->wTotalLength); - new_secd = krealloc(secd, secd_size, GFP_KERNEL); - if (new_secd == NULL) { - dev_err(dev, - "Can't allocate space for security descriptors\n"); - result = -ENOMEM; - goto out; - } - secd = new_secd; - result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, - 0, secd, secd_size); - if (result < secd_size) { - dev_err(dev, "Can't read security descriptor or " - "not enough data: %d\n", result); - goto out; - } - bytes = 0; - itr = &secd[1]; - top = (void *)secd + result; - while (itr < top) { - etd = itr; - if (top - itr < sizeof(*etd)) { - dev_err(dev, "BUG: bad device security descriptor; " - "not enough data (%zu vs %zu bytes left)\n", - top - itr, sizeof(*etd)); - break; - } - if (etd->bLength < sizeof(*etd)) { - dev_err(dev, "BUG: bad device encryption descriptor; " - "descriptor is too short " - "(%u vs %zu needed)\n", - etd->bLength, sizeof(*etd)); - break; - } - itr += etd->bLength; - bytes += snprintf(buf + bytes, sizeof(buf) - bytes, - "%s (0x%02x/%02x) ", - wusb_et_name(etd->bEncryptionType), - etd->bEncryptionValue, etd->bAuthKeyIndex); - if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1) - ccm1_etd = etd; - } - /* This code only supports CCM1 as of now. */ - /* FIXME: user has to choose which sec mode to use? - * In theory we want CCM */ - if (ccm1_etd == NULL) { - dev_err(dev, "WUSB device doesn't support CCM1 encryption, " - "can't use!\n"); - result = -EINVAL; - goto out; - } - wusb_dev->ccm1_etd = *ccm1_etd; - dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", - buf, wusb_et_name(ccm1_etd->bEncryptionType), - ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); - result = 0; -out: - kfree(secd); - return result; -} - -void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) -{ - /* Nothing so far */ -} - -/** - * Update the address of an unauthenticated WUSB device - * - * Once we have successfully authenticated, we take it to addr0 state - * and then to a normal address. - * - * Before the device's address (as known by it) was usb_dev->devnum | - * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. - */ -int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) -{ - int result = -ENOMEM; - struct usb_device *usb_dev = wusb_dev->usb_dev; - struct device *dev = &usb_dev->dev; - u8 new_address = wusb_dev->addr & 0x7F; - - /* Set address 0 */ - result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_ADDRESS, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "auth failed: can't set address 0: %d\n", - result); - goto error_addr0; - } - result = wusb_set_dev_addr(wusbhc, wusb_dev, 0); - if (result < 0) - goto error_addr0; - usb_set_device_state(usb_dev, USB_STATE_DEFAULT); - usb_ep0_reinit(usb_dev); - - /* Set new (authenticated) address. */ - result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_ADDRESS, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - new_address, 0, NULL, 0, - USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "auth failed: can't set address %u: %d\n", - new_address, result); - goto error_addr; - } - result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address); - if (result < 0) - goto error_addr; - usb_set_device_state(usb_dev, USB_STATE_ADDRESS); - usb_ep0_reinit(usb_dev); - usb_dev->authenticated = 1; -error_addr: -error_addr0: - return result; -} - -/* - * - * - */ -/* FIXME: split and cleanup */ -int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, - struct wusb_ckhdid *ck) -{ - int result = -ENOMEM; - struct usb_device *usb_dev = wusb_dev->usb_dev; - struct device *dev = &usb_dev->dev; - u32 tkid; - struct usb_handshake *hs; - struct aes_ccm_nonce ccm_n; - u8 mic[8]; - struct wusb_keydvt_in keydvt_in; - struct wusb_keydvt_out keydvt_out; - - hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL); - if (!hs) - goto error_kzalloc; - - /* We need to turn encryption before beginning the 4way - * hshake (WUSB1.0[.3.2.2]) */ - result = wusb_dev_set_encryption(usb_dev, 1); - if (result < 0) - goto error_dev_set_encryption; - - tkid = wusbhc_next_tkid(wusbhc, wusb_dev); - - hs[0].bMessageNumber = 1; - hs[0].bStatus = 0; - put_unaligned_le32(tkid, hs[0].tTKID); - hs[0].bReserved = 0; - memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID)); - get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); - memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ - - result = usb_control_msg( - usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_HANDSHAKE, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Handshake1: request failed: %d\n", result); - goto error_hs1; - } - - /* Handshake 2, from the device -- need to verify fields */ - result = usb_control_msg( - usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_GET_HANDSHAKE, - USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Handshake2: request failed: %d\n", result); - goto error_hs2; - } - - result = -EINVAL; - if (hs[1].bMessageNumber != 2) { - dev_err(dev, "Handshake2 failed: bad message number %u\n", - hs[1].bMessageNumber); - goto error_hs2; - } - if (hs[1].bStatus != 0) { - dev_err(dev, "Handshake2 failed: bad status %u\n", - hs[1].bStatus); - goto error_hs2; - } - if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) { - dev_err(dev, "Handshake2 failed: TKID mismatch " - "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n", - hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2], - hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]); - goto error_hs2; - } - if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) { - dev_err(dev, "Handshake2 failed: CDID mismatch\n"); - goto error_hs2; - } - - /* Setup the CCM nonce */ - memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */ - put_unaligned_le32(tkid, ccm_n.tkid); - ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr; - ccm_n.dest_addr.data[0] = wusb_dev->addr; - ccm_n.dest_addr.data[1] = 0; - - /* Derive the KCK and PTK from CK, the CCM, H and D nonces */ - memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce)); - memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce)); - result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in); - if (result < 0) { - dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n", - result); - goto error_hs2; - } - - /* Compute MIC and verify it */ - result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); - if (result < 0) { - dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n", - result); - goto error_hs2; - } - - if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { - dev_err(dev, "Handshake2 failed: MIC mismatch\n"); - goto error_hs2; - } - - /* Send Handshake3 */ - hs[2].bMessageNumber = 3; - hs[2].bStatus = 0; - put_unaligned_le32(tkid, hs[2].tTKID); - hs[2].bReserved = 0; - memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID)); - memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce)); - result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]); - if (result < 0) { - dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n", - result); - goto error_hs2; - } - - result = usb_control_msg( - usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_HANDSHAKE, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "Handshake3: request failed: %d\n", result); - goto error_hs3; - } - - result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, - keydvt_out.ptk, sizeof(keydvt_out.ptk)); - if (result < 0) - goto error_wusbhc_set_ptk; - - result = wusb_dev_set_gtk(wusbhc, wusb_dev); - if (result < 0) { - dev_err(dev, "Set GTK for device: request failed: %d\n", - result); - goto error_wusbhc_set_gtk; - } - - /* Update the device's address from unauth to auth */ - if (usb_dev->authenticated == 0) { - result = wusb_dev_update_address(wusbhc, wusb_dev); - if (result < 0) - goto error_dev_update_address; - } - result = 0; - dev_info(dev, "device authenticated\n"); - -error_dev_update_address: -error_wusbhc_set_gtk: -error_wusbhc_set_ptk: -error_hs3: -error_hs2: -error_hs1: - memset(hs, 0, 3*sizeof(hs[0])); - memzero_explicit(&keydvt_out, sizeof(keydvt_out)); - memzero_explicit(&keydvt_in, sizeof(keydvt_in)); - memzero_explicit(&ccm_n, sizeof(ccm_n)); - memzero_explicit(mic, sizeof(mic)); - if (result < 0) - wusb_dev_set_encryption(usb_dev, 0); -error_dev_set_encryption: - kfree(hs); -error_kzalloc: - return result; -} - -/* - * Once all connected and authenticated devices have received the new - * GTK, switch the host to using it. - */ -static void wusbhc_gtk_rekey_work(struct work_struct *work) -{ - struct wusbhc *wusbhc = container_of(work, - struct wusbhc, gtk_rekey_work); - size_t key_size = sizeof(wusbhc->gtk.data); - int port_idx; - struct wusb_dev *wusb_dev, *wusb_dev_next; - LIST_HEAD(rekey_list); - - mutex_lock(&wusbhc->mutex); - /* generate the new key */ - wusbhc_generate_gtk(wusbhc); - /* roll the gtk index. */ - wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1); - /* - * Save all connected devices on a list while holding wusbhc->mutex and - * take a reference to each one. Then submit the set key request to - * them after releasing the lock in order to avoid a deadlock. - */ - for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) { - wusb_dev = wusbhc->port[port_idx].wusb_dev; - if (!wusb_dev || !wusb_dev->usb_dev - || !wusb_dev->usb_dev->authenticated) - continue; - - wusb_dev_get(wusb_dev); - list_add_tail(&wusb_dev->rekey_node, &rekey_list); - } - mutex_unlock(&wusbhc->mutex); - - /* Submit the rekey requests without holding wusbhc->mutex. */ - list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list, - rekey_node) { - list_del_init(&wusb_dev->rekey_node); - dev_dbg(&wusb_dev->usb_dev->dev, - "%s: rekey device at port %d\n", - __func__, wusb_dev->port_idx); - - if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) { - dev_err(&wusb_dev->usb_dev->dev, - "%s: rekey device at port %d failed\n", - __func__, wusb_dev->port_idx); - } - wusb_dev_put(wusb_dev); - } - - /* Switch the host controller to use the new GTK. */ - mutex_lock(&wusbhc->mutex); - wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, - &wusbhc->gtk.descr.bKeyData, key_size); - mutex_unlock(&wusbhc->mutex); -} - -/** - * wusbhc_gtk_rekey - generate and distribute a new GTK - * @wusbhc: the WUSB host controller - * - * Generate a new GTK and distribute it to all connected and - * authenticated devices. When all devices have the new GTK, the host - * starts using it. - * - * This must be called after every device disconnect (see [WUSB] - * section 6.2.11.2). - */ -void wusbhc_gtk_rekey(struct wusbhc *wusbhc) -{ - /* - * We need to submit a URB to the downstream WUSB devices in order to - * change the group key. This can't be done while holding the - * wusbhc->mutex since that is also taken in the urb_enqueue routine - * and will cause a deadlock. Instead, queue a work item to do - * it when the lock is not held - */ - queue_work(wusbhc->wq_security, &wusbhc->gtk_rekey_work); -} diff --git a/drivers/staging/wusbcore/wa-hc.c b/drivers/staging/wusbcore/wa-hc.c deleted file mode 100644 index 6827075fb8a1..000000000000 --- a/drivers/staging/wusbcore/wa-hc.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wire Adapter Host Controller Driver - * Common items to HWA and DWA based HCDs - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - */ -#include <linux/slab.h> -#include <linux/module.h> -#include "wusbhc.h" -#include "wa-hc.h" - -/** - * Assumes - * - * wa->usb_dev and wa->usb_iface initialized and refcounted, - * wa->wa_descr initialized. - */ -int wa_create(struct wahc *wa, struct usb_interface *iface, - kernel_ulong_t quirks) -{ - int result; - struct device *dev = &iface->dev; - - if (iface->cur_altsetting->desc.bNumEndpoints < 3) - return -ENODEV; - - result = wa_rpipes_create(wa); - if (result < 0) - goto error_rpipes_create; - wa->quirks = quirks; - /* Fill up Data Transfer EP pointers */ - wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc; - wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc; - wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd); - wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL); - if (wa->dti_buf == NULL) { - result = -ENOMEM; - goto error_dti_buf_alloc; - } - result = wa_nep_create(wa, iface); - if (result < 0) { - dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n", - result); - goto error_nep_create; - } - return 0; - -error_nep_create: - kfree(wa->dti_buf); -error_dti_buf_alloc: - wa_rpipes_destroy(wa); -error_rpipes_create: - return result; -} -EXPORT_SYMBOL_GPL(wa_create); - - -void __wa_destroy(struct wahc *wa) -{ - if (wa->dti_urb) { - usb_kill_urb(wa->dti_urb); - usb_put_urb(wa->dti_urb); - } - kfree(wa->dti_buf); - wa_nep_destroy(wa); - wa_rpipes_destroy(wa); -} -EXPORT_SYMBOL_GPL(__wa_destroy); - -/** - * wa_reset_all - reset the WA device - * @wa: the WA to be reset - * - * For HWAs the radio controller and all other PALs are also reset. - */ -void wa_reset_all(struct wahc *wa) -{ - /* FIXME: assuming HWA. */ - wusbhc_reset_all(wa->wusb); -} - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Wireless USB Wire Adapter core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wusbcore/wa-hc.h b/drivers/staging/wusbcore/wa-hc.h deleted file mode 100644 index 5a38465724c2..000000000000 --- a/drivers/staging/wusbcore/wa-hc.h +++ /dev/null @@ -1,467 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * HWA Host Controller Driver - * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8]) - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This driver implements a USB Host Controller (struct usb_hcd) for a - * Wireless USB Host Controller based on the Wireless USB 1.0 - * Host-Wire-Adapter specification (in layman terms, a USB-dongle that - * implements a Wireless USB host). - * - * Check out the Design-overview.txt file in the source documentation - * for other details on the implementation. - * - * Main blocks: - * - * driver glue with the driver API, workqueue daemon - * - * lc RC instance life cycle management (create, destroy...) - * - * hcd glue with the USB API Host Controller Interface API. - * - * nep Notification EndPoint management: collect notifications - * and queue them with the workqueue daemon. - * - * Handle notifications as coming from the NEP. Sends them - * off others to their respective modules (eg: connect, - * disconnect and reset go to devconnect). - * - * rpipe Remote Pipe management; rpipe is what we use to write - * to an endpoint on a WUSB device that is connected to a - * HWA RC. - * - * xfer Transfer management -- this is all the code that gets a - * buffer and pushes it to a device (or viceversa). * - * - * Some day a lot of this code will be shared between this driver and - * the drivers for DWA (xfer, rpipe). - * - * All starts at driver.c:hwahc_probe(), when one of this guys is - * connected. hwahc_disconnect() stops it. - * - * During operation, the main driver is devices connecting or - * disconnecting. They cause the HWA RC to send notifications into - * nep.c:hwahc_nep_cb() that will dispatch them to - * notif.c:wa_notif_dispatch(). From there they will fan to cause - * device connects, disconnects, etc. - * - * Note much of the activity is difficult to follow. For example a - * device connect goes to devconnect, which will cause the "fake" root - * hub port to show a connect and stop there. Then hub_wq will notice - * and call into the rh.c:hwahc_rc_port_reset() code to authenticate - * the device (and this might require user intervention) and enable - * the port. - * - * We also have a timer workqueue going from devconnect.c that - * schedules in hwahc_devconnect_create(). - * - * The rest of the traffic is in the usual entry points of a USB HCD, - * which are hooked up in driver.c:hwahc_rc_driver, and defined in - * hcd.c. - */ - -#ifndef __HWAHC_INTERNAL_H__ -#define __HWAHC_INTERNAL_H__ - -#include <linux/completion.h> -#include <linux/usb.h> -#include <linux/mutex.h> -#include <linux/spinlock.h> -#include "../uwb/uwb.h" -#include "include/wusb.h" -#include "include/wusb-wa.h" - -struct wusbhc; -struct wahc; -extern void wa_urb_enqueue_run(struct work_struct *ws); -extern void wa_process_errored_transfers_run(struct work_struct *ws); - -/** - * RPipe instance - * - * @descr's fields are kept in LE, as we need to send it back and - * forth. - * - * @wa is referenced when set - * - * @segs_available is the number of requests segments that still can - * be submitted to the controller without overloading - * it. It is initialized to descr->wRequests when - * aiming. - * - * A rpipe supports a max of descr->wRequests at the same time; before - * submitting seg_lock has to be taken. If segs_avail > 0, then we can - * submit; if not, we have to queue them. - */ -struct wa_rpipe { - struct kref refcnt; - struct usb_rpipe_descriptor descr; - struct usb_host_endpoint *ep; - struct wahc *wa; - spinlock_t seg_lock; - struct list_head seg_list; - struct list_head list_node; - atomic_t segs_available; - u8 buffer[1]; /* For reads/writes on USB */ -}; - - -enum wa_dti_state { - WA_DTI_TRANSFER_RESULT_PENDING, - WA_DTI_ISOC_PACKET_STATUS_PENDING, - WA_DTI_BUF_IN_DATA_PENDING -}; - -enum wa_quirks { - /* - * The Alereon HWA expects the data frames in isochronous transfer - * requests to be concatenated and not sent as separate packets. - */ - WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01, - /* - * The Alereon HWA can be instructed to not send transfer notifications - * as an optimization. - */ - WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02, -}; - -enum wa_vendor_specific_requests { - WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C, - WA_REQ_ALEREON_FEATURE_SET = 0x01, - WA_REQ_ALEREON_FEATURE_CLEAR = 0x00, -}; - -#define WA_MAX_BUF_IN_URBS 4 -/** - * Instance of a HWA Host Controller - * - * Except where a more specific lock/mutex applies or atomic, all - * fields protected by @mutex. - * - * @wa_descr Can be accessed without locking because it is in - * the same area where the device descriptors were - * read, so it is guaranteed to exist unmodified while - * the device exists. - * - * Endianess has been converted to CPU's. - * - * @nep_* can be accessed without locking as its processing is - * serialized; we submit a NEP URB and it comes to - * hwahc_nep_cb(), which won't issue another URB until it is - * done processing it. - * - * @xfer_list: - * - * List of active transfers to verify existence from a xfer id - * gotten from the xfer result message. Can't use urb->list because - * it goes by endpoint, and we don't know the endpoint at the time - * when we get the xfer result message. We can't really rely on the - * pointer (will have to change for 64 bits) as the xfer id is 32 bits. - * - * @xfer_delayed_list: List of transfers that need to be started - * (with a workqueue, because they were - * submitted from an atomic context). - * - * FIXME: this needs to be layered up: a wusbhc layer (for sharing - * commonalities with WHCI), a wa layer (for sharing - * commonalities with DWA-RC). - */ -struct wahc { - struct usb_device *usb_dev; - struct usb_interface *usb_iface; - - /* HC to deliver notifications */ - union { - struct wusbhc *wusb; - struct dwahc *dwa; - }; - - const struct usb_endpoint_descriptor *dto_epd, *dti_epd; - const struct usb_wa_descriptor *wa_descr; - - struct urb *nep_urb; /* Notification EndPoint [lockless] */ - struct edc nep_edc; - void *nep_buffer; - size_t nep_buffer_size; - - atomic_t notifs_queued; - - u16 rpipes; - unsigned long *rpipe_bm; /* rpipe usage bitmap */ - struct list_head rpipe_delayed_list; /* delayed RPIPES. */ - spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */ - struct mutex rpipe_mutex; /* assigning resources to endpoints */ - - /* - * dti_state is used to track the state of the dti_urb. When dti_state - * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and - * dti_isoc_xfer_seg identify which xfer the incoming isoc packet - * status refers to. - */ - enum wa_dti_state dti_state; - u32 dti_isoc_xfer_in_progress; - u8 dti_isoc_xfer_seg; - struct urb *dti_urb; /* URB for reading xfer results */ - /* URBs for reading data in */ - struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS]; - int active_buf_in_urbs; /* number of buf_in_urbs active. */ - struct edc dti_edc; /* DTI error density counter */ - void *dti_buf; - size_t dti_buf_size; - - unsigned long dto_in_use; /* protect dto endoint serialization */ - - s32 status; /* For reading status */ - - struct list_head xfer_list; - struct list_head xfer_delayed_list; - struct list_head xfer_errored_list; - /* - * lock for the above xfer lists. Can be taken while a xfer->lock is - * held but not in the reverse order. - */ - spinlock_t xfer_list_lock; - struct work_struct xfer_enqueue_work; - struct work_struct xfer_error_work; - atomic_t xfer_id_count; - - kernel_ulong_t quirks; -}; - - -extern int wa_create(struct wahc *wa, struct usb_interface *iface, - kernel_ulong_t); -extern void __wa_destroy(struct wahc *wa); -extern int wa_dti_start(struct wahc *wa); -void wa_reset_all(struct wahc *wa); - - -/* Miscellaneous constants */ -enum { - /** Max number of EPROTO errors we tolerate on the NEP in a - * period of time */ - HWAHC_EPROTO_MAX = 16, - /** Period of time for EPROTO errors (in jiffies) */ - HWAHC_EPROTO_PERIOD = 4 * HZ, -}; - - -/* Notification endpoint handling */ -extern int wa_nep_create(struct wahc *, struct usb_interface *); -extern void wa_nep_destroy(struct wahc *); - -static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) -{ - struct urb *urb = wa->nep_urb; - urb->transfer_buffer = wa->nep_buffer; - urb->transfer_buffer_length = wa->nep_buffer_size; - return usb_submit_urb(urb, gfp_mask); -} - -static inline void wa_nep_disarm(struct wahc *wa) -{ - usb_kill_urb(wa->nep_urb); -} - - -/* RPipes */ -static inline void wa_rpipe_init(struct wahc *wa) -{ - INIT_LIST_HEAD(&wa->rpipe_delayed_list); - spin_lock_init(&wa->rpipe_lock); - mutex_init(&wa->rpipe_mutex); -} - -static inline void wa_init(struct wahc *wa) -{ - int index; - - edc_init(&wa->nep_edc); - atomic_set(&wa->notifs_queued, 0); - wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; - wa_rpipe_init(wa); - edc_init(&wa->dti_edc); - INIT_LIST_HEAD(&wa->xfer_list); - INIT_LIST_HEAD(&wa->xfer_delayed_list); - INIT_LIST_HEAD(&wa->xfer_errored_list); - spin_lock_init(&wa->xfer_list_lock); - INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run); - INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); - wa->dto_in_use = 0; - atomic_set(&wa->xfer_id_count, 1); - /* init the buf in URBs */ - for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) - usb_init_urb(&(wa->buf_in_urbs[index])); - wa->active_buf_in_urbs = 0; -} - -/** - * Destroy a pipe (when refcount drops to zero) - * - * Assumes it has been moved to the "QUIESCING" state. - */ -struct wa_xfer; -extern void rpipe_destroy(struct kref *_rpipe); -static inline -void __rpipe_get(struct wa_rpipe *rpipe) -{ - kref_get(&rpipe->refcnt); -} -extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *, - struct urb *, gfp_t); -static inline void rpipe_put(struct wa_rpipe *rpipe) -{ - kref_put(&rpipe->refcnt, rpipe_destroy); - -} -extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *); -extern void rpipe_clear_feature_stalled(struct wahc *, - struct usb_host_endpoint *); -extern int wa_rpipes_create(struct wahc *); -extern void wa_rpipes_destroy(struct wahc *); -static inline void rpipe_avail_dec(struct wa_rpipe *rpipe) -{ - atomic_dec(&rpipe->segs_available); -} - -/** - * Returns true if the rpipe is ready to submit more segments. - */ -static inline int rpipe_avail_inc(struct wa_rpipe *rpipe) -{ - return atomic_inc_return(&rpipe->segs_available) > 0 - && !list_empty(&rpipe->seg_list); -} - - -/* Transferring data */ -extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *, - struct urb *, gfp_t); -extern int wa_urb_dequeue(struct wahc *, struct urb *, int); -extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *); - - -/* Misc - * - * FIXME: Refcounting for the actual @hwahc object is not correct; I - * mean, this should be refcounting on the HCD underneath, but - * it is not. In any case, the semantics for HCD refcounting - * are *weird*...on refcount reaching zero it just frees - * it...no RC specific function is called...unless I miss - * something. - * - * FIXME: has to go away in favour of a 'struct' hcd based solution - */ -static inline struct wahc *wa_get(struct wahc *wa) -{ - usb_get_intf(wa->usb_iface); - return wa; -} - -static inline void wa_put(struct wahc *wa) -{ - usb_put_intf(wa->usb_iface); -} - - -static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature) -{ - return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - feature, - wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - NULL, 0, USB_CTRL_SET_TIMEOUT); -} - - -static inline int __wa_set_feature(struct wahc *wa, u16 feature) -{ - return __wa_feature(wa, 1, feature); -} - - -static inline int __wa_clear_feature(struct wahc *wa, u16 feature) -{ - return __wa_feature(wa, 0, feature); -} - - -/** - * Return the status of a Wire Adapter - * - * @wa: Wire Adapter instance - * @returns < 0 errno code on error, or status bitmap as described - * in WUSB1.0[8.3.1.6]. - * - * NOTE: need malloc, some arches don't take USB from the stack - */ -static inline -s32 __wa_get_status(struct wahc *wa) -{ - s32 result; - result = usb_control_msg( - wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), - USB_REQ_GET_STATUS, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT); - if (result >= 0) - result = wa->status; - return result; -} - - -/** - * Waits until the Wire Adapter's status matches @mask/@value - * - * @wa: Wire Adapter instance. - * @returns < 0 errno code on error, otherwise status. - * - * Loop until the WAs status matches the mask and value (status & mask - * == value). Timeout if it doesn't happen. - * - * FIXME: is there an official specification on how long status - * changes can take? - */ -static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value) -{ - s32 result; - unsigned loops = 10; - do { - msleep(50); - result = __wa_get_status(wa); - if ((result & mask) == value) - break; - if (loops-- == 0) { - result = -ETIMEDOUT; - break; - } - } while (result >= 0); - return result; -} - - -/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */ -static inline int __wa_stop(struct wahc *wa) -{ - int result; - struct device *dev = &wa->usb_iface->dev; - - result = __wa_clear_feature(wa, WA_ENABLE); - if (result < 0 && result != -ENODEV) { - dev_err(dev, "error commanding HC to stop: %d\n", result); - goto out; - } - result = __wa_wait_status(wa, WA_ENABLE, 0); - if (result < 0 && result != -ENODEV) - dev_err(dev, "error waiting for HC to stop: %d\n", result); -out: - return 0; -} - - -#endif /* #ifndef __HWAHC_INTERNAL_H__ */ diff --git a/drivers/staging/wusbcore/wa-nep.c b/drivers/staging/wusbcore/wa-nep.c deleted file mode 100644 index 5f0656db5482..000000000000 --- a/drivers/staging/wusbcore/wa-nep.c +++ /dev/null @@ -1,289 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) - * Notification EndPoint support - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This part takes care of getting the notification from the hw - * only and dispatching through wusbwad into - * wa_notif_dispatch. Handling is done there. - * - * WA notifications are limited in size; most of them are three or - * four bytes long, and the longest is the HWA Device Notification, - * which would not exceed 38 bytes (DNs are limited in payload to 32 - * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA - * header (WUSB1.0[8.5.4.2]). - * - * It is not clear if more than one Device Notification can be packed - * in a HWA Notification, I assume no because of the wording in - * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could - * get is 256 bytes (as the bLength field is a byte). - * - * So what we do is we have this buffer and read into it; when a - * notification arrives we schedule work to a specific, single thread - * workqueue (so notifications are serialized) and copy the - * notification data. After scheduling the work, we rearm the read from - * the notification endpoint. - * - * Entry points here are: - * - * wa_nep_[create|destroy]() To initialize/release this subsystem - * - * wa_nep_cb() Callback for the notification - * endpoint; when data is ready, this - * does the dispatching. - */ -#include <linux/workqueue.h> -#include <linux/ctype.h> -#include <linux/slab.h> - -#include "wa-hc.h" -#include "wusbhc.h" - -/* Structure for queueing notifications to the workqueue */ -struct wa_notif_work { - struct work_struct work; - struct wahc *wa; - size_t size; - u8 data[]; -}; - -/* - * Process incoming notifications from the WA's Notification EndPoint - * [the wuswad daemon, basically] - * - * @_nw: Pointer to a descriptor which has the pointer to the - * @wa, the size of the buffer and the work queue - * structure (so we can free all when done). - * @returns 0 if ok, < 0 errno code on error. - * - * All notifications follow the same format; they need to start with a - * 'struct wa_notif_hdr' header, so it is easy to parse through - * them. We just break the buffer in individual notifications (the - * standard doesn't say if it can be done or is forbidden, so we are - * cautious) and dispatch each. - * - * So the handling layers are is: - * - * WA specific notification (from NEP) - * Device Notification Received -> wa_handle_notif_dn() - * WUSB Device notification generic handling - * BPST Adjustment -> wa_handle_notif_bpst_adj() - * ... -> ... - * - * @wa has to be referenced - */ -static void wa_notif_dispatch(struct work_struct *ws) -{ - void *itr; - u8 missing = 0; - struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, - work); - struct wahc *wa = nw->wa; - struct wa_notif_hdr *notif_hdr; - size_t size; - - struct device *dev = &wa->usb_iface->dev; - -#if 0 - /* FIXME: need to check for this??? */ - if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */ - goto out; /* screw it */ -#endif - atomic_dec(&wa->notifs_queued); /* Throttling ctl */ - size = nw->size; - itr = nw->data; - - while (size) { - if (size < sizeof(*notif_hdr)) { - missing = sizeof(*notif_hdr) - size; - goto exhausted_buffer; - } - notif_hdr = itr; - if (size < notif_hdr->bLength) - goto exhausted_buffer; - itr += notif_hdr->bLength; - size -= notif_hdr->bLength; - /* Dispatch the notification [don't use itr or size!] */ - switch (notif_hdr->bNotifyType) { - case HWA_NOTIF_DN: { - struct hwa_notif_dn *hwa_dn; - hwa_dn = container_of(notif_hdr, struct hwa_notif_dn, - hdr); - wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr, - hwa_dn->dndata, - notif_hdr->bLength - sizeof(*hwa_dn)); - break; - } - case WA_NOTIF_TRANSFER: - wa_handle_notif_xfer(wa, notif_hdr); - break; - case HWA_NOTIF_BPST_ADJ: - break; /* no action needed for BPST ADJ. */ - case DWA_NOTIF_RWAKE: - case DWA_NOTIF_PORTSTATUS: - /* FIXME: unimplemented WA NOTIFs */ - /* fallthru */ - default: - dev_err(dev, "HWA: unknown notification 0x%x, " - "%zu bytes; discarding\n", - notif_hdr->bNotifyType, - (size_t)notif_hdr->bLength); - break; - } - } -out: - wa_put(wa); - kfree(nw); - return; - - /* THIS SHOULD NOT HAPPEN - * - * Buffer exahusted with partial data remaining; just warn and - * discard the data, as this should not happen. - */ -exhausted_buffer: - dev_warn(dev, "HWA: device sent short notification, " - "%d bytes missing; discarding %d bytes.\n", - missing, (int)size); - goto out; -} - -/* - * Deliver incoming WA notifications to the wusbwa workqueue - * - * @wa: Pointer the Wire Adapter Controller Data Streaming - * instance (part of an 'struct usb_hcd'). - * @size: Size of the received buffer - * @returns 0 if ok, < 0 errno code on error. - * - * The input buffer is @wa->nep_buffer, with @size bytes - * (guaranteed to fit in the allocated space, - * @wa->nep_buffer_size). - */ -static int wa_nep_queue(struct wahc *wa, size_t size) -{ - int result = 0; - struct device *dev = &wa->usb_iface->dev; - struct wa_notif_work *nw; - - /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */ - BUG_ON(size > wa->nep_buffer_size); - if (size == 0) - goto out; - if (atomic_read(&wa->notifs_queued) > 200) { - if (printk_ratelimit()) - dev_err(dev, "Too many notifications queued, " - "throttling back\n"); - goto out; - } - nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC); - if (nw == NULL) { - if (printk_ratelimit()) - dev_err(dev, "No memory to queue notification\n"); - result = -ENOMEM; - goto out; - } - INIT_WORK(&nw->work, wa_notif_dispatch); - nw->wa = wa_get(wa); - nw->size = size; - memcpy(nw->data, wa->nep_buffer, size); - atomic_inc(&wa->notifs_queued); /* Throttling ctl */ - queue_work(wusbd, &nw->work); -out: - /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */ - return result; -} - -/* - * Callback for the notification event endpoint - * - * Check's that everything is fine and then passes the data to be - * queued to the workqueue. - */ -static void wa_nep_cb(struct urb *urb) -{ - int result; - struct wahc *wa = urb->context; - struct device *dev = &wa->usb_iface->dev; - - switch (result = urb->status) { - case 0: - result = wa_nep_queue(wa, urb->actual_length); - if (result < 0) - dev_err(dev, "NEP: unable to process notification(s): " - "%d\n", result); - break; - case -ECONNRESET: /* Not an error, but a controlled situation; */ - case -ENOENT: /* (we killed the URB)...so, no broadcast */ - case -ESHUTDOWN: - dev_dbg(dev, "NEP: going down %d\n", urb->status); - goto out; - default: /* On general errors, we retry unless it gets ugly */ - if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "NEP: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - goto out; - } - dev_err(dev, "NEP: URB error %d\n", urb->status); - } - result = wa_nep_arm(wa, GFP_ATOMIC); - if (result < 0) { - dev_err(dev, "NEP: cannot submit URB: %d\n", result); - wa_reset_all(wa); - } -out: - return; -} - -/* - * Initialize @wa's notification and event's endpoint stuff - * - * This includes the allocating the read buffer, the context ID - * allocation bitmap, the URB and submitting the URB. - */ -int wa_nep_create(struct wahc *wa, struct usb_interface *iface) -{ - int result; - struct usb_endpoint_descriptor *epd; - struct usb_device *usb_dev = interface_to_usbdev(iface); - struct device *dev = &iface->dev; - - edc_init(&wa->nep_edc); - epd = &iface->cur_altsetting->endpoint[0].desc; - wa->nep_buffer_size = 1024; - wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL); - if (!wa->nep_buffer) - goto error_nep_buffer; - wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL); - if (wa->nep_urb == NULL) - goto error_urb_alloc; - usb_fill_int_urb(wa->nep_urb, usb_dev, - usb_rcvintpipe(usb_dev, epd->bEndpointAddress), - wa->nep_buffer, wa->nep_buffer_size, - wa_nep_cb, wa, epd->bInterval); - result = wa_nep_arm(wa, GFP_KERNEL); - if (result < 0) { - dev_err(dev, "Cannot submit notification URB: %d\n", result); - goto error_nep_arm; - } - return 0; - -error_nep_arm: - usb_free_urb(wa->nep_urb); -error_urb_alloc: - kfree(wa->nep_buffer); -error_nep_buffer: - return -ENOMEM; -} - -void wa_nep_destroy(struct wahc *wa) -{ - wa_nep_disarm(wa); - usb_free_urb(wa->nep_urb); - kfree(wa->nep_buffer); -} diff --git a/drivers/staging/wusbcore/wa-rpipe.c b/drivers/staging/wusbcore/wa-rpipe.c deleted file mode 100644 index a5734cbcd5ad..000000000000 --- a/drivers/staging/wusbcore/wa-rpipe.c +++ /dev/null @@ -1,539 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB Wire Adapter - * rpipe management - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: docs - * - * RPIPE - * - * Targeted at different downstream endpoints - * - * Descriptor: use to config the remote pipe. - * - * The number of blocks could be dynamic (wBlocks in descriptor is - * 0)--need to schedule them then. - * - * Each bit in wa->rpipe_bm represents if an rpipe is being used or - * not. Rpipes are represented with a 'struct wa_rpipe' that is - * attached to the hcpriv member of a 'struct usb_host_endpoint'. - * - * When you need to xfer data to an endpoint, you get an rpipe for it - * with wa_ep_rpipe_get(), which gives you a reference to the rpipe - * and keeps a single one (the first one) with the endpoint. When you - * are done transferring, you drop that reference. At the end the - * rpipe is always allocated and bound to the endpoint. There it might - * be recycled when not used. - * - * Addresses: - * - * We use a 1:1 mapping mechanism between port address (0 based - * index, actually) and the address. The USB stack knows about this. - * - * USB Stack port number 4 (1 based) - * WUSB code port index 3 (0 based) - * USB Address 5 (2 based -- 0 is for default, 1 for root hub) - * - * Now, because we don't use the concept as default address exactly - * like the (wired) USB code does, we need to kind of skip it. So we - * never take addresses from the urb->pipe, but from the - * urb->dev->devnum, to make sure that we always have the right - * destination address. - */ -#include <linux/atomic.h> -#include <linux/bitmap.h> -#include <linux/slab.h> -#include <linux/export.h> - -#include "wusbhc.h" -#include "wa-hc.h" - -static int __rpipe_get_descr(struct wahc *wa, - struct usb_rpipe_descriptor *descr, u16 index) -{ - ssize_t result; - struct device *dev = &wa->usb_iface->dev; - - /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() - * function because the arguments are different. - */ - result = usb_control_msg( - wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), - USB_REQ_GET_DESCRIPTOR, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE, - USB_DT_RPIPE<<8, index, descr, sizeof(*descr), - USB_CTRL_GET_TIMEOUT); - if (result < 0) { - dev_err(dev, "rpipe %u: get descriptor failed: %d\n", - index, (int)result); - goto error; - } - if (result < sizeof(*descr)) { - dev_err(dev, "rpipe %u: got short descriptor " - "(%zd vs %zd bytes needed)\n", - index, result, sizeof(*descr)); - result = -EINVAL; - goto error; - } - result = 0; - -error: - return result; -} - -/* - * - * The descriptor is assumed to be properly initialized (ie: you got - * it through __rpipe_get_descr()). - */ -static int __rpipe_set_descr(struct wahc *wa, - struct usb_rpipe_descriptor *descr, u16 index) -{ - ssize_t result; - struct device *dev = &wa->usb_iface->dev; - - /* we cannot use the usb_get_descriptor() function because the - * arguments are different. - */ - result = usb_control_msg( - wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_SET_DESCRIPTOR, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, - USB_DT_RPIPE<<8, index, descr, sizeof(*descr), - USB_CTRL_SET_TIMEOUT); - if (result < 0) { - dev_err(dev, "rpipe %u: set descriptor failed: %d\n", - index, (int)result); - goto error; - } - if (result < sizeof(*descr)) { - dev_err(dev, "rpipe %u: sent short descriptor " - "(%zd vs %zd bytes required)\n", - index, result, sizeof(*descr)); - result = -EINVAL; - goto error; - } - result = 0; - -error: - return result; - -} - -static void rpipe_init(struct wa_rpipe *rpipe) -{ - kref_init(&rpipe->refcnt); - spin_lock_init(&rpipe->seg_lock); - INIT_LIST_HEAD(&rpipe->seg_list); - INIT_LIST_HEAD(&rpipe->list_node); -} - -static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx) -{ - unsigned long flags; - - spin_lock_irqsave(&wa->rpipe_lock, flags); - rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx); - if (rpipe_idx < wa->rpipes) - set_bit(rpipe_idx, wa->rpipe_bm); - spin_unlock_irqrestore(&wa->rpipe_lock, flags); - - return rpipe_idx; -} - -static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx) -{ - unsigned long flags; - - spin_lock_irqsave(&wa->rpipe_lock, flags); - clear_bit(rpipe_idx, wa->rpipe_bm); - spin_unlock_irqrestore(&wa->rpipe_lock, flags); -} - -void rpipe_destroy(struct kref *_rpipe) -{ - struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); - u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); - - if (rpipe->ep) - rpipe->ep->hcpriv = NULL; - rpipe_put_idx(rpipe->wa, index); - wa_put(rpipe->wa); - kfree(rpipe); -} -EXPORT_SYMBOL_GPL(rpipe_destroy); - -/* - * Locate an idle rpipe, create an structure for it and return it - * - * @wa is referenced and unlocked - * @crs enum rpipe_attr, required endpoint characteristics - * - * The rpipe can be used only sequentially (not in parallel). - * - * The rpipe is moved into the "ready" state. - */ -static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs, - gfp_t gfp) -{ - int result; - unsigned rpipe_idx; - struct wa_rpipe *rpipe; - struct device *dev = &wa->usb_iface->dev; - - rpipe = kzalloc(sizeof(*rpipe), gfp); - if (rpipe == NULL) - return -ENOMEM; - rpipe_init(rpipe); - - /* Look for an idle pipe */ - for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) { - rpipe_idx = rpipe_get_idx(wa, rpipe_idx); - if (rpipe_idx >= wa->rpipes) /* no more pipes :( */ - break; - result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx); - if (result < 0) - dev_err(dev, "Can't get descriptor for rpipe %u: %d\n", - rpipe_idx, result); - else if ((rpipe->descr.bmCharacteristics & crs) != 0) - goto found; - rpipe_put_idx(wa, rpipe_idx); - } - *prpipe = NULL; - kfree(rpipe); - return -ENXIO; - -found: - set_bit(rpipe_idx, wa->rpipe_bm); - rpipe->wa = wa_get(wa); - *prpipe = rpipe; - return 0; -} - -static int __rpipe_reset(struct wahc *wa, unsigned index) -{ - int result; - struct device *dev = &wa->usb_iface->dev; - - result = usb_control_msg( - wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_RPIPE_RESET, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, - 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (result < 0) - dev_err(dev, "rpipe %u: reset failed: %d\n", - index, result); - return result; -} - -/* - * Fake companion descriptor for ep0 - * - * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl - */ -static struct usb_wireless_ep_comp_descriptor epc0 = { - .bLength = sizeof(epc0), - .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP, - .bMaxBurst = 1, - .bMaxSequence = 2, -}; - -/* - * Look for EP companion descriptor - * - * Get there, look for Inara in the endpoint's extra descriptors - */ -static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find( - struct device *dev, struct usb_host_endpoint *ep) -{ - void *itr; - size_t itr_size; - struct usb_descriptor_header *hdr; - struct usb_wireless_ep_comp_descriptor *epcd; - - if (ep->desc.bEndpointAddress == 0) { - epcd = &epc0; - goto out; - } - itr = ep->extra; - itr_size = ep->extralen; - epcd = NULL; - while (itr_size > 0) { - if (itr_size < sizeof(*hdr)) { - dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors " - "at offset %zu: only %zu bytes left\n", - ep->desc.bEndpointAddress, - itr - (void *) ep->extra, itr_size); - break; - } - hdr = itr; - if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) { - epcd = itr; - break; - } - if (hdr->bLength > itr_size) { - dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor " - "at offset %zu (type 0x%02x) " - "length %d but only %zu bytes left\n", - ep->desc.bEndpointAddress, - itr - (void *) ep->extra, hdr->bDescriptorType, - hdr->bLength, itr_size); - break; - } - itr += hdr->bLength; - itr_size -= hdr->bLength; - } -out: - return epcd; -} - -/* - * Aim an rpipe to its device & endpoint destination - * - * Make sure we change the address to unauthenticated if the device - * is WUSB and it is not authenticated. - */ -static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, - struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp) -{ - int result = -ENOMSG; /* better code for lack of companion? */ - struct device *dev = &wa->usb_iface->dev; - struct usb_device *usb_dev = urb->dev; - struct usb_wireless_ep_comp_descriptor *epcd; - u32 ack_window, epcd_max_sequence; - u8 unauth; - - epcd = rpipe_epc_find(dev, ep); - if (epcd == NULL) { - dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", - ep->desc.bEndpointAddress); - goto error; - } - unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0; - __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex)); - atomic_set(&rpipe->segs_available, - le16_to_cpu(rpipe->descr.wRequests)); - /* FIXME: block allocation system; request with queuing and timeout */ - /* FIXME: compute so seg_size > ep->maxpktsize */ - rpipe->descr.wBlocks = cpu_to_le16(16); /* given */ - /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */ - if (usb_endpoint_xfer_isoc(&ep->desc)) - rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize; - else - rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize; - - rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int, - epcd->bMaxBurst, 16U), 1U); - rpipe->descr.hwa_bDeviceInfoIndex = - wusb_port_no_to_idx(urb->dev->portnum); - /* FIXME: use maximum speed as supported or recommended by device */ - rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? - UWB_PHY_RATE_53 : UWB_PHY_RATE_200; - - dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", - urb->dev->devnum, urb->dev->devnum | unauth, - le16_to_cpu(rpipe->descr.wRPipeIndex), - usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); - - rpipe->descr.hwa_reserved = 0; - - rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress; - /* FIXME: bDataSequence */ - rpipe->descr.bDataSequence = 0; - - /* start with base window of hwa_bMaxBurst bits starting at 0. */ - ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst); - rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window); - epcd_max_sequence = max(min_t(unsigned int, - epcd->bMaxSequence, 32U), 2U); - rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1; - rpipe->descr.bInterval = ep->desc.bInterval; - if (usb_endpoint_xfer_isoc(&ep->desc)) - rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval; - else - rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */ - /* FIXME: xmit power & preamble blah blah */ - rpipe->descr.bmAttribute = (ep->desc.bmAttributes & - USB_ENDPOINT_XFERTYPE_MASK); - /* rpipe->descr.bmCharacteristics RO */ - rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF); - /* FIXME: use for assessing link quality? */ - rpipe->descr.wNumTransactionErrors = 0; - result = __rpipe_set_descr(wa, &rpipe->descr, - le16_to_cpu(rpipe->descr.wRPipeIndex)); - if (result < 0) { - dev_err(dev, "Cannot aim rpipe: %d\n", result); - goto error; - } - result = 0; -error: - return result; -} - -/* - * Check an aimed rpipe to make sure it points to where we want - * - * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth - * space; when it is like that, we or 0x80 to make an unauth address. - */ -static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa, - const struct usb_host_endpoint *ep, - const struct urb *urb, gfp_t gfp) -{ - int result = 0; - struct device *dev = &wa->usb_iface->dev; - u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); - -#define AIM_CHECK(rdf, val, text) \ - do { \ - if (rpipe->descr.rdf != (val)) { \ - dev_err(dev, \ - "rpipe aim discrepancy: " #rdf " " text "\n", \ - rpipe->descr.rdf, (val)); \ - result = -EINVAL; \ - WARN_ON(1); \ - } \ - } while (0) - AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)"); - AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ? - UWB_PHY_RATE_53 : UWB_PHY_RATE_200, - "(%u vs %u)"); - AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)"); - AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)"); - AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)"); -#undef AIM_CHECK - return result; -} - -#ifndef CONFIG_BUG -#define CONFIG_BUG 0 -#endif - -/* - * Make sure there is an rpipe allocated for an endpoint - * - * If already allocated, we just refcount it; if not, we get an - * idle one, aim it to the right location and take it. - * - * Attaches to ep->hcpriv and rpipe->ep to ep. - */ -int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, - struct urb *urb, gfp_t gfp) -{ - int result = 0; - struct device *dev = &wa->usb_iface->dev; - struct wa_rpipe *rpipe; - u8 eptype; - - mutex_lock(&wa->rpipe_mutex); - rpipe = ep->hcpriv; - if (rpipe != NULL) { - if (CONFIG_BUG == 1) { - result = rpipe_check_aim(rpipe, wa, ep, urb, gfp); - if (result < 0) - goto error; - } - __rpipe_get(rpipe); - dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n", - ep->desc.bEndpointAddress, - le16_to_cpu(rpipe->descr.wRPipeIndex)); - } else { - /* hmm, assign idle rpipe, aim it */ - result = -ENOBUFS; - eptype = ep->desc.bmAttributes & 0x03; - result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp); - if (result < 0) - goto error; - result = rpipe_aim(rpipe, wa, ep, urb, gfp); - if (result < 0) { - rpipe_put(rpipe); - goto error; - } - ep->hcpriv = rpipe; - rpipe->ep = ep; - __rpipe_get(rpipe); /* for caching into ep->hcpriv */ - dev_dbg(dev, "ep 0x%02x: using rpipe %u\n", - ep->desc.bEndpointAddress, - le16_to_cpu(rpipe->descr.wRPipeIndex)); - } -error: - mutex_unlock(&wa->rpipe_mutex); - return result; -} - -/* - * Allocate the bitmap for each rpipe. - */ -int wa_rpipes_create(struct wahc *wa) -{ - wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes); - wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL); - if (wa->rpipe_bm == NULL) - return -ENOMEM; - return 0; -} - -void wa_rpipes_destroy(struct wahc *wa) -{ - struct device *dev = &wa->usb_iface->dev; - - if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { - WARN_ON(1); - dev_err(dev, "BUG: pipes not released on exit: %*pb\n", - wa->rpipes, wa->rpipe_bm); - } - bitmap_free(wa->rpipe_bm); -} - -/* - * Release resources allocated for an endpoint - * - * If there is an associated rpipe to this endpoint, Abort any pending - * transfers and put it. If the rpipe ends up being destroyed, - * __rpipe_destroy() will cleanup ep->hcpriv. - * - * This is called before calling hcd->stop(), so you don't need to do - * anything else in there. - */ -void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) -{ - struct wa_rpipe *rpipe; - - mutex_lock(&wa->rpipe_mutex); - rpipe = ep->hcpriv; - if (rpipe != NULL) { - u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); - - usb_control_msg( - wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_RPIPE_ABORT, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, - 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT); - rpipe_put(rpipe); - } - mutex_unlock(&wa->rpipe_mutex); -} -EXPORT_SYMBOL_GPL(rpipe_ep_disable); - -/* Clear the stalled status of an RPIPE. */ -void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep) -{ - struct wa_rpipe *rpipe; - - mutex_lock(&wa->rpipe_mutex); - rpipe = ep->hcpriv; - if (rpipe != NULL) { - u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); - - usb_control_msg( - wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, - RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT); - } - mutex_unlock(&wa->rpipe_mutex); -} -EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled); diff --git a/drivers/staging/wusbcore/wa-xfer.c b/drivers/staging/wusbcore/wa-xfer.c deleted file mode 100644 index abf88cea37bb..000000000000 --- a/drivers/staging/wusbcore/wa-xfer.c +++ /dev/null @@ -1,2927 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * WUSB Wire Adapter - * Data transfer and URB enqueing - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * How transfers work: get a buffer, break it up in segments (segment - * size is a multiple of the maxpacket size). For each segment issue a - * segment request (struct wa_xfer_*), then send the data buffer if - * out or nothing if in (all over the DTO endpoint). - * - * For each submitted segment request, a notification will come over - * the NEP endpoint and a transfer result (struct xfer_result) will - * arrive in the DTI URB. Read it, get the xfer ID, see if there is - * data coming (inbound transfer), schedule a read and handle it. - * - * Sounds simple, it is a pain to implement. - * - * - * ENTRY POINTS - * - * FIXME - * - * LIFE CYCLE / STATE DIAGRAM - * - * FIXME - * - * THIS CODE IS DISGUSTING - * - * Warned you are; it's my second try and still not happy with it. - * - * NOTES: - * - * - No iso - * - * - Supports DMA xfers, control, bulk and maybe interrupt - * - * - Does not recycle unused rpipes - * - * An rpipe is assigned to an endpoint the first time it is used, - * and then it's there, assigned, until the endpoint is disabled - * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the - * rpipe to the endpoint is done under the wa->rpipe_sem semaphore - * (should be a mutex). - * - * Two methods it could be done: - * - * (a) set up a timer every time an rpipe's use count drops to 1 - * (which means unused) or when a transfer ends. Reset the - * timer when a xfer is queued. If the timer expires, release - * the rpipe [see rpipe_ep_disable()]. - * - * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], - * when none are found go over the list, check their endpoint - * and their activity record (if no last-xfer-done-ts in the - * last x seconds) take it - * - * However, due to the fact that we have a set of limited - * resources (max-segments-at-the-same-time per xfer, - * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end - * we are going to have to rebuild all this based on an scheduler, - * to where we have a list of transactions to do and based on the - * availability of the different required components (blocks, - * rpipes, segment slots, etc), we go scheduling them. Painful. - */ -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/hash.h> -#include <linux/ratelimit.h> -#include <linux/export.h> -#include <linux/scatterlist.h> - -#include "wa-hc.h" -#include "wusbhc.h" - -enum { - /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */ - WA_SEGS_MAX = 128, -}; - -enum wa_seg_status { - WA_SEG_NOTREADY, - WA_SEG_READY, - WA_SEG_DELAYED, - WA_SEG_SUBMITTED, - WA_SEG_PENDING, - WA_SEG_DTI_PENDING, - WA_SEG_DONE, - WA_SEG_ERROR, - WA_SEG_ABORTED, -}; - -static void wa_xfer_delayed_run(struct wa_rpipe *); -static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting); - -/* - * Life cycle governed by 'struct urb' (the refcount of the struct is - * that of the 'struct urb' and usb_free_urb() would free the whole - * struct). - */ -struct wa_seg { - struct urb tr_urb; /* transfer request urb. */ - struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */ - struct urb *dto_urb; /* for data output. */ - struct list_head list_node; /* for rpipe->req_list */ - struct wa_xfer *xfer; /* out xfer */ - u8 index; /* which segment we are */ - int isoc_frame_count; /* number of isoc frames in this segment. */ - int isoc_frame_offset; /* starting frame offset in the xfer URB. */ - /* Isoc frame that the current transfer buffer corresponds to. */ - int isoc_frame_index; - int isoc_size; /* size of all isoc frames sent by this seg. */ - enum wa_seg_status status; - ssize_t result; /* bytes xfered or error */ - struct wa_xfer_hdr xfer_hdr; -}; - -static inline void wa_seg_init(struct wa_seg *seg) -{ - usb_init_urb(&seg->tr_urb); - - /* set the remaining memory to 0. */ - memset(((void *)seg) + sizeof(seg->tr_urb), 0, - sizeof(*seg) - sizeof(seg->tr_urb)); -} - -/* - * Protected by xfer->lock - * - */ -struct wa_xfer { - struct kref refcnt; - struct list_head list_node; - spinlock_t lock; - u32 id; - - struct wahc *wa; /* Wire adapter we are plugged to */ - struct usb_host_endpoint *ep; - struct urb *urb; /* URB we are transferring for */ - struct wa_seg **seg; /* transfer segments */ - u8 segs, segs_submitted, segs_done; - unsigned is_inbound:1; - unsigned is_dma:1; - size_t seg_size; - int result; - - gfp_t gfp; /* allocation mask */ - - struct wusb_dev *wusb_dev; /* for activity timestamps */ -}; - -static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, - struct wa_seg *seg, int curr_iso_frame); -static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, - int starting_index, enum wa_seg_status status); - -static inline void wa_xfer_init(struct wa_xfer *xfer) -{ - kref_init(&xfer->refcnt); - INIT_LIST_HEAD(&xfer->list_node); - spin_lock_init(&xfer->lock); -} - -/* - * Destroy a transfer structure - * - * Note that freeing xfer->seg[cnt]->tr_urb will free the containing - * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs. - */ -static void wa_xfer_destroy(struct kref *_xfer) -{ - struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); - if (xfer->seg) { - unsigned cnt; - for (cnt = 0; cnt < xfer->segs; cnt++) { - struct wa_seg *seg = xfer->seg[cnt]; - if (seg) { - usb_free_urb(seg->isoc_pack_desc_urb); - if (seg->dto_urb) { - kfree(seg->dto_urb->sg); - usb_free_urb(seg->dto_urb); - } - usb_free_urb(&seg->tr_urb); - } - } - kfree(xfer->seg); - } - kfree(xfer); -} - -static void wa_xfer_get(struct wa_xfer *xfer) -{ - kref_get(&xfer->refcnt); -} - -static void wa_xfer_put(struct wa_xfer *xfer) -{ - kref_put(&xfer->refcnt, wa_xfer_destroy); -} - -/* - * Try to get exclusive access to the DTO endpoint resource. Return true - * if successful. - */ -static inline int __wa_dto_try_get(struct wahc *wa) -{ - return (test_and_set_bit(0, &wa->dto_in_use) == 0); -} - -/* Release the DTO endpoint resource. */ -static inline void __wa_dto_put(struct wahc *wa) -{ - clear_bit_unlock(0, &wa->dto_in_use); -} - -/* Service RPIPEs that are waiting on the DTO resource. */ -static void wa_check_for_delayed_rpipes(struct wahc *wa) -{ - unsigned long flags; - int dto_waiting = 0; - struct wa_rpipe *rpipe; - - spin_lock_irqsave(&wa->rpipe_lock, flags); - while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) { - rpipe = list_first_entry(&wa->rpipe_delayed_list, - struct wa_rpipe, list_node); - __wa_xfer_delayed_run(rpipe, &dto_waiting); - /* remove this RPIPE from the list if it is not waiting. */ - if (!dto_waiting) { - pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n", - __func__, - le16_to_cpu(rpipe->descr.wRPipeIndex)); - list_del_init(&rpipe->list_node); - } - } - spin_unlock_irqrestore(&wa->rpipe_lock, flags); -} - -/* add this RPIPE to the end of the delayed RPIPE list. */ -static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe) -{ - unsigned long flags; - - spin_lock_irqsave(&wa->rpipe_lock, flags); - /* add rpipe to the list if it is not already on it. */ - if (list_empty(&rpipe->list_node)) { - pr_debug("%s: adding RPIPE %d to the delayed list.\n", - __func__, le16_to_cpu(rpipe->descr.wRPipeIndex)); - list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list); - } - spin_unlock_irqrestore(&wa->rpipe_lock, flags); -} - -/* - * xfer is referenced - * - * xfer->lock has to be unlocked - * - * We take xfer->lock for setting the result; this is a barrier - * against drivers/usb/core/hcd.c:unlink1() being called after we call - * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a - * reference to the transfer. - */ -static void wa_xfer_giveback(struct wa_xfer *xfer) -{ - unsigned long flags; - - spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); - list_del_init(&xfer->list_node); - usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb); - spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); - /* FIXME: segmentation broken -- kills DWA */ - wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); - wa_put(xfer->wa); - wa_xfer_put(xfer); -} - -/* - * xfer is referenced - * - * xfer->lock has to be unlocked - */ -static void wa_xfer_completion(struct wa_xfer *xfer) -{ - if (xfer->wusb_dev) - wusb_dev_put(xfer->wusb_dev); - rpipe_put(xfer->ep->hcpriv); - wa_xfer_giveback(xfer); -} - -/* - * Initialize a transfer's ID - * - * We need to use a sequential number; if we use the pointer or the - * hash of the pointer, it can repeat over sequential transfers and - * then it will confuse the HWA....wonder why in hell they put a 32 - * bit handle in there then. - */ -static void wa_xfer_id_init(struct wa_xfer *xfer) -{ - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); -} - -/* Return the xfer's ID. */ -static inline u32 wa_xfer_id(struct wa_xfer *xfer) -{ - return xfer->id; -} - -/* Return the xfer's ID in transport format (little endian). */ -static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) -{ - return cpu_to_le32(xfer->id); -} - -/* - * If transfer is done, wrap it up and return true - * - * xfer->lock has to be locked - */ -static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) -{ - struct device *dev = &xfer->wa->usb_iface->dev; - unsigned result, cnt; - struct wa_seg *seg; - struct urb *urb = xfer->urb; - unsigned found_short = 0; - - result = xfer->segs_done == xfer->segs_submitted; - if (result == 0) - goto out; - urb->actual_length = 0; - for (cnt = 0; cnt < xfer->segs; cnt++) { - seg = xfer->seg[cnt]; - switch (seg->status) { - case WA_SEG_DONE: - if (found_short && seg->result > 0) { - dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n", - xfer, wa_xfer_id(xfer), cnt, - seg->result); - urb->status = -EINVAL; - goto out; - } - urb->actual_length += seg->result; - if (!(usb_pipeisoc(xfer->urb->pipe)) - && seg->result < xfer->seg_size - && cnt != xfer->segs-1) - found_short = 1; - dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d " - "result %zu urb->actual_length %d\n", - xfer, wa_xfer_id(xfer), seg->index, found_short, - seg->result, urb->actual_length); - break; - case WA_SEG_ERROR: - xfer->result = seg->result; - dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n", - xfer, wa_xfer_id(xfer), seg->index, seg->result, - seg->result); - goto out; - case WA_SEG_ABORTED: - xfer->result = seg->result; - dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n", - xfer, wa_xfer_id(xfer), seg->index, seg->result, - seg->result); - goto out; - default: - dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", - xfer, wa_xfer_id(xfer), cnt, seg->status); - xfer->result = -EINVAL; - goto out; - } - } - xfer->result = 0; -out: - return result; -} - -/* - * Mark the given segment as done. Return true if this completes the xfer. - * This should only be called for segs that have been submitted to an RPIPE. - * Delayed segs are not marked as submitted so they do not need to be marked - * as done when cleaning up. - * - * xfer->lock has to be locked - */ -static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer, - struct wa_seg *seg, enum wa_seg_status status) -{ - seg->status = status; - xfer->segs_done++; - - /* check for done. */ - return __wa_xfer_is_done(xfer); -} - -/* - * Search for a transfer list ID on the HCD's URB list - * - * For 32 bit architectures, we use the pointer itself; for 64 bits, a - * 32-bit hash of the pointer. - * - * @returns NULL if not found. - */ -static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) -{ - unsigned long flags; - struct wa_xfer *xfer_itr; - spin_lock_irqsave(&wa->xfer_list_lock, flags); - list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { - if (id == xfer_itr->id) { - wa_xfer_get(xfer_itr); - goto out; - } - } - xfer_itr = NULL; -out: - spin_unlock_irqrestore(&wa->xfer_list_lock, flags); - return xfer_itr; -} - -struct wa_xfer_abort_buffer { - struct urb urb; - struct wahc *wa; - struct wa_xfer_abort cmd; -}; - -static void __wa_xfer_abort_cb(struct urb *urb) -{ - struct wa_xfer_abort_buffer *b = urb->context; - struct wahc *wa = b->wa; - - /* - * If the abort request URB failed, then the HWA did not get the abort - * command. Forcibly clean up the xfer without waiting for a Transfer - * Result from the HWA. - */ - if (urb->status < 0) { - struct wa_xfer *xfer; - struct device *dev = &wa->usb_iface->dev; - - xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID)); - dev_err(dev, "%s: Transfer Abort request failed. result: %d\n", - __func__, urb->status); - if (xfer) { - unsigned long flags; - int done, seg_index = 0; - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - - dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n", - __func__, xfer, wa_xfer_id(xfer)); - spin_lock_irqsave(&xfer->lock, flags); - /* skip done segs. */ - while (seg_index < xfer->segs) { - struct wa_seg *seg = xfer->seg[seg_index]; - - if ((seg->status == WA_SEG_DONE) || - (seg->status == WA_SEG_ERROR)) { - ++seg_index; - } else { - break; - } - } - /* mark remaining segs as aborted. */ - wa_complete_remaining_xfer_segs(xfer, seg_index, - WA_SEG_ABORTED); - done = __wa_xfer_is_done(xfer); - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - wa_xfer_delayed_run(rpipe); - wa_xfer_put(xfer); - } else { - dev_err(dev, "%s: xfer ID 0x%08X already gone.\n", - __func__, le32_to_cpu(b->cmd.dwTransferID)); - } - } - - wa_put(wa); /* taken in __wa_xfer_abort */ - usb_put_urb(&b->urb); -} - -/* - * Aborts an ongoing transaction - * - * Assumes the transfer is referenced and locked and in a submitted - * state (mainly that there is an endpoint/rpipe assigned). - * - * The callback (see above) does nothing but freeing up the data by - * putting the URB. Because the URB is allocated at the head of the - * struct, the whole space we allocated is kfreed. * - */ -static int __wa_xfer_abort(struct wa_xfer *xfer) -{ - int result = -ENOMEM; - struct device *dev = &xfer->wa->usb_iface->dev; - struct wa_xfer_abort_buffer *b; - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - - b = kmalloc(sizeof(*b), GFP_ATOMIC); - if (b == NULL) - goto error_kmalloc; - b->cmd.bLength = sizeof(b->cmd); - b->cmd.bRequestType = WA_XFER_ABORT; - b->cmd.wRPipe = rpipe->descr.wRPipeIndex; - b->cmd.dwTransferID = wa_xfer_id_le32(xfer); - b->wa = wa_get(xfer->wa); - - usb_init_urb(&b->urb); - usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, - usb_sndbulkpipe(xfer->wa->usb_dev, - xfer->wa->dto_epd->bEndpointAddress), - &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); - result = usb_submit_urb(&b->urb, GFP_ATOMIC); - if (result < 0) - goto error_submit; - return result; /* callback frees! */ - - -error_submit: - wa_put(xfer->wa); - if (printk_ratelimit()) - dev_err(dev, "xfer %p: Can't submit abort request: %d\n", - xfer, result); - kfree(b); -error_kmalloc: - return result; - -} - -/* - * Calculate the number of isoc frames starting from isoc_frame_offset - * that will fit a in transfer segment. - */ -static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer, - int isoc_frame_offset, int *total_size) -{ - int segment_size = 0, frame_count = 0; - int index = isoc_frame_offset; - struct usb_iso_packet_descriptor *iso_frame_desc = - xfer->urb->iso_frame_desc; - - while ((index < xfer->urb->number_of_packets) - && ((segment_size + iso_frame_desc[index].length) - <= xfer->seg_size)) { - /* - * For Alereon HWA devices, only include an isoc frame in an - * out segment if it is physically contiguous with the previous - * frame. This is required because those devices expect - * the isoc frames to be sent as a single USB transaction as - * opposed to one transaction per frame with standard HWA. - */ - if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) - && (xfer->is_inbound == 0) - && (index > isoc_frame_offset) - && ((iso_frame_desc[index - 1].offset + - iso_frame_desc[index - 1].length) != - iso_frame_desc[index].offset)) - break; - - /* this frame fits. count it. */ - ++frame_count; - segment_size += iso_frame_desc[index].length; - - /* move to the next isoc frame. */ - ++index; - } - - *total_size = segment_size; - return frame_count; -} - -/* - * - * @returns < 0 on error, transfer segment request size if ok - */ -static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, - enum wa_xfer_type *pxfer_type) -{ - ssize_t result; - struct device *dev = &xfer->wa->usb_iface->dev; - size_t maxpktsize; - struct urb *urb = xfer->urb; - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - - switch (rpipe->descr.bmAttribute & 0x3) { - case USB_ENDPOINT_XFER_CONTROL: - *pxfer_type = WA_XFER_TYPE_CTL; - result = sizeof(struct wa_xfer_ctl); - break; - case USB_ENDPOINT_XFER_INT: - case USB_ENDPOINT_XFER_BULK: - *pxfer_type = WA_XFER_TYPE_BI; - result = sizeof(struct wa_xfer_bi); - break; - case USB_ENDPOINT_XFER_ISOC: - *pxfer_type = WA_XFER_TYPE_ISO; - result = sizeof(struct wa_xfer_hwaiso); - break; - default: - /* never happens */ - BUG(); - result = -EINVAL; /* shut gcc up */ - } - xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; - xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; - - maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); - xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) - * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); - /* Compute the segment size and make sure it is a multiple of - * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of - * a check (FIXME) */ - if (xfer->seg_size < maxpktsize) { - dev_err(dev, - "HW BUG? seg_size %zu smaller than maxpktsize %zu\n", - xfer->seg_size, maxpktsize); - result = -EINVAL; - goto error; - } - xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; - if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) { - int index = 0; - - xfer->segs = 0; - /* - * loop over urb->number_of_packets to determine how many - * xfer segments will be needed to send the isoc frames. - */ - while (index < urb->number_of_packets) { - int seg_size; /* don't care. */ - index += __wa_seg_calculate_isoc_frame_count(xfer, - index, &seg_size); - ++xfer->segs; - } - } else { - xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, - xfer->seg_size); - if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) - xfer->segs = 1; - } - - if (xfer->segs > WA_SEGS_MAX) { - dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n", - (urb->transfer_buffer_length/xfer->seg_size), - WA_SEGS_MAX); - result = -EINVAL; - goto error; - } -error: - return result; -} - -static void __wa_setup_isoc_packet_descr( - struct wa_xfer_packet_info_hwaiso *packet_desc, - struct wa_xfer *xfer, - struct wa_seg *seg) { - struct usb_iso_packet_descriptor *iso_frame_desc = - xfer->urb->iso_frame_desc; - int frame_index; - - /* populate isoc packet descriptor. */ - packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO; - packet_desc->wLength = cpu_to_le16(struct_size(packet_desc, - PacketLength, - seg->isoc_frame_count)); - for (frame_index = 0; frame_index < seg->isoc_frame_count; - ++frame_index) { - int offset_index = frame_index + seg->isoc_frame_offset; - packet_desc->PacketLength[frame_index] = - cpu_to_le16(iso_frame_desc[offset_index].length); - } -} - - -/* Fill in the common request header and xfer-type specific data. */ -static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, - struct wa_xfer_hdr *xfer_hdr0, - enum wa_xfer_type xfer_type, - size_t xfer_hdr_size) -{ - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - struct wa_seg *seg = xfer->seg[0]; - - xfer_hdr0 = &seg->xfer_hdr; - xfer_hdr0->bLength = xfer_hdr_size; - xfer_hdr0->bRequestType = xfer_type; - xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; - xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer); - xfer_hdr0->bTransferSegment = 0; - switch (xfer_type) { - case WA_XFER_TYPE_CTL: { - struct wa_xfer_ctl *xfer_ctl = - container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); - xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; - memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, - sizeof(xfer_ctl->baSetupData)); - break; - } - case WA_XFER_TYPE_BI: - break; - case WA_XFER_TYPE_ISO: { - struct wa_xfer_hwaiso *xfer_iso = - container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr); - struct wa_xfer_packet_info_hwaiso *packet_desc = - ((void *)xfer_iso) + xfer_hdr_size; - - /* populate the isoc section of the transfer request. */ - xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count); - /* populate isoc packet descriptor. */ - __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); - break; - } - default: - BUG(); - }; -} - -/* - * Callback for the OUT data phase of the segment request - * - * Check wa_seg_tr_cb(); most comments also apply here because this - * function does almost the same thing and they work closely - * together. - * - * If the seg request has failed but this DTO phase has succeeded, - * wa_seg_tr_cb() has already failed the segment and moved the - * status to WA_SEG_ERROR, so this will go through 'case 0' and - * effectively do nothing. - */ -static void wa_seg_dto_cb(struct urb *urb) -{ - struct wa_seg *seg = urb->context; - struct wa_xfer *xfer = seg->xfer; - struct wahc *wa; - struct device *dev; - struct wa_rpipe *rpipe; - unsigned long flags; - unsigned rpipe_ready = 0; - int data_send_done = 1, release_dto = 0, holding_dto = 0; - u8 done = 0; - int result; - - /* free the sg if it was used. */ - kfree(urb->sg); - urb->sg = NULL; - - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - if (usb_pipeisoc(xfer->urb->pipe)) { - /* Alereon HWA sends all isoc frames in a single transfer. */ - if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) - seg->isoc_frame_index += seg->isoc_frame_count; - else - seg->isoc_frame_index += 1; - if (seg->isoc_frame_index < seg->isoc_frame_count) { - data_send_done = 0; - holding_dto = 1; /* checked in error cases. */ - /* - * if this is the last isoc frame of the segment, we - * can release DTO after sending this frame. - */ - if ((seg->isoc_frame_index + 1) >= - seg->isoc_frame_count) - release_dto = 1; - } - dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n", - wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, - holding_dto, release_dto); - } - spin_unlock_irqrestore(&xfer->lock, flags); - - switch (urb->status) { - case 0: - spin_lock_irqsave(&xfer->lock, flags); - seg->result += urb->actual_length; - if (data_send_done) { - dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n", - wa_xfer_id(xfer), seg->index, seg->result); - if (seg->status < WA_SEG_PENDING) - seg->status = WA_SEG_PENDING; - } else { - /* should only hit this for isoc xfers. */ - /* - * Populate the dto URB with the next isoc frame buffer, - * send the URB and release DTO if we no longer need it. - */ - __wa_populate_dto_urb_isoc(xfer, seg, - seg->isoc_frame_offset + seg->isoc_frame_index); - - /* resubmit the URB with the next isoc frame. */ - /* take a ref on resubmit. */ - wa_xfer_get(xfer); - result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); - if (result < 0) { - dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n", - wa_xfer_id(xfer), seg->index, result); - spin_unlock_irqrestore(&xfer->lock, flags); - goto error_dto_submit; - } - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (release_dto) { - __wa_dto_put(wa); - wa_check_for_delayed_rpipes(wa); - } - break; - case -ECONNRESET: /* URB unlinked; no need to do anything */ - case -ENOENT: /* as it was done by the who unlinked us */ - if (holding_dto) { - __wa_dto_put(wa); - wa_check_for_delayed_rpipes(wa); - } - break; - default: /* Other errors ... */ - dev_err(dev, "xfer 0x%08X#%u: data out error %d\n", - wa_xfer_id(xfer), seg->index, urb->status); - goto error_default; - } - - /* taken when this URB was submitted. */ - wa_xfer_put(xfer); - return; - -error_dto_submit: - /* taken on resubmit attempt. */ - wa_xfer_put(xfer); -error_default: - spin_lock_irqsave(&xfer->lock, flags); - rpipe = xfer->ep->hcpriv; - if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)){ - dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n"); - wa_reset_all(wa); - } - if (seg->status != WA_SEG_ERROR) { - seg->result = urb->status; - __wa_xfer_abort(xfer); - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (holding_dto) { - __wa_dto_put(wa); - wa_check_for_delayed_rpipes(wa); - } - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - /* taken when this URB was submitted. */ - wa_xfer_put(xfer); -} - -/* - * Callback for the isoc packet descriptor phase of the segment request - * - * Check wa_seg_tr_cb(); most comments also apply here because this - * function does almost the same thing and they work closely - * together. - * - * If the seg request has failed but this phase has succeeded, - * wa_seg_tr_cb() has already failed the segment and moved the - * status to WA_SEG_ERROR, so this will go through 'case 0' and - * effectively do nothing. - */ -static void wa_seg_iso_pack_desc_cb(struct urb *urb) -{ - struct wa_seg *seg = urb->context; - struct wa_xfer *xfer = seg->xfer; - struct wahc *wa; - struct device *dev; - struct wa_rpipe *rpipe; - unsigned long flags; - unsigned rpipe_ready = 0; - u8 done = 0; - - switch (urb->status) { - case 0: - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n", - wa_xfer_id(xfer), seg->index); - if (xfer->is_inbound && seg->status < WA_SEG_PENDING) - seg->status = WA_SEG_PENDING; - spin_unlock_irqrestore(&xfer->lock, flags); - break; - case -ECONNRESET: /* URB unlinked; no need to do anything */ - case -ENOENT: /* as it was done by the who unlinked us */ - break; - default: /* Other errors ... */ - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - rpipe = xfer->ep->hcpriv; - pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n", - wa_xfer_id(xfer), seg->index, urb->status); - if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)){ - dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n"); - wa_reset_all(wa); - } - if (seg->status != WA_SEG_ERROR) { - usb_unlink_urb(seg->dto_urb); - seg->result = urb->status; - __wa_xfer_abort(xfer); - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, - WA_SEG_ERROR); - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - } - /* taken when this URB was submitted. */ - wa_xfer_put(xfer); -} - -/* - * Callback for the segment request - * - * If successful transition state (unless already transitioned or - * outbound transfer); otherwise, take a note of the error, mark this - * segment done and try completion. - * - * Note we don't access until we are sure that the transfer hasn't - * been cancelled (ECONNRESET, ENOENT), which could mean that - * seg->xfer could be already gone. - * - * We have to check before setting the status to WA_SEG_PENDING - * because sometimes the xfer result callback arrives before this - * callback (geeeeeeze), so it might happen that we are already in - * another state. As well, we don't set it if the transfer is not inbound, - * as in that case, wa_seg_dto_cb will do it when the OUT data phase - * finishes. - */ -static void wa_seg_tr_cb(struct urb *urb) -{ - struct wa_seg *seg = urb->context; - struct wa_xfer *xfer = seg->xfer; - struct wahc *wa; - struct device *dev; - struct wa_rpipe *rpipe; - unsigned long flags; - unsigned rpipe_ready; - u8 done = 0; - - switch (urb->status) { - case 0: - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n", - xfer, wa_xfer_id(xfer), seg->index); - if (xfer->is_inbound && - seg->status < WA_SEG_PENDING && - !(usb_pipeisoc(xfer->urb->pipe))) - seg->status = WA_SEG_PENDING; - spin_unlock_irqrestore(&xfer->lock, flags); - break; - case -ECONNRESET: /* URB unlinked; no need to do anything */ - case -ENOENT: /* as it was done by the who unlinked us */ - break; - default: /* Other errors ... */ - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - rpipe = xfer->ep->hcpriv; - if (printk_ratelimit()) - dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n", - xfer, wa_xfer_id(xfer), seg->index, - urb->status); - if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)){ - dev_err(dev, "DTO: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - } - usb_unlink_urb(seg->isoc_pack_desc_urb); - usb_unlink_urb(seg->dto_urb); - seg->result = urb->status; - __wa_xfer_abort(xfer); - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - } - /* taken when this URB was submitted. */ - wa_xfer_put(xfer); -} - -/* - * Allocate an SG list to store bytes_to_transfer bytes and copy the - * subset of the in_sg that matches the buffer subset - * we are about to transfer. - */ -static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg, - const unsigned int bytes_transferred, - const unsigned int bytes_to_transfer, int *out_num_sgs) -{ - struct scatterlist *out_sg; - unsigned int bytes_processed = 0, offset_into_current_page_data = 0, - nents; - struct scatterlist *current_xfer_sg = in_sg; - struct scatterlist *current_seg_sg, *last_seg_sg; - - /* skip previously transferred pages. */ - while ((current_xfer_sg) && - (bytes_processed < bytes_transferred)) { - bytes_processed += current_xfer_sg->length; - - /* advance the sg if current segment starts on or past the - next page. */ - if (bytes_processed <= bytes_transferred) - current_xfer_sg = sg_next(current_xfer_sg); - } - - /* the data for the current segment starts in current_xfer_sg. - calculate the offset. */ - if (bytes_processed > bytes_transferred) { - offset_into_current_page_data = current_xfer_sg->length - - (bytes_processed - bytes_transferred); - } - - /* calculate the number of pages needed by this segment. */ - nents = DIV_ROUND_UP((bytes_to_transfer + - offset_into_current_page_data + - current_xfer_sg->offset), - PAGE_SIZE); - - out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC); - if (out_sg) { - sg_init_table(out_sg, nents); - - /* copy the portion of the incoming SG that correlates to the - * data to be transferred by this segment to the segment SG. */ - last_seg_sg = current_seg_sg = out_sg; - bytes_processed = 0; - - /* reset nents and calculate the actual number of sg entries - needed. */ - nents = 0; - while ((bytes_processed < bytes_to_transfer) && - current_seg_sg && current_xfer_sg) { - unsigned int page_len = min((current_xfer_sg->length - - offset_into_current_page_data), - (bytes_to_transfer - bytes_processed)); - - sg_set_page(current_seg_sg, sg_page(current_xfer_sg), - page_len, - current_xfer_sg->offset + - offset_into_current_page_data); - - bytes_processed += page_len; - - last_seg_sg = current_seg_sg; - current_seg_sg = sg_next(current_seg_sg); - current_xfer_sg = sg_next(current_xfer_sg); - - /* only the first page may require additional offset. */ - offset_into_current_page_data = 0; - nents++; - } - - /* update num_sgs and terminate the list since we may have - * concatenated pages. */ - sg_mark_end(last_seg_sg); - *out_num_sgs = nents; - } - - return out_sg; -} - -/* - * Populate DMA buffer info for the isoc dto urb. - */ -static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, - struct wa_seg *seg, int curr_iso_frame) -{ - seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - seg->dto_urb->sg = NULL; - seg->dto_urb->num_sgs = 0; - /* dto urb buffer address pulled from iso_frame_desc. */ - seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + - xfer->urb->iso_frame_desc[curr_iso_frame].offset; - /* The Alereon HWA sends a single URB with all isoc segs. */ - if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) - seg->dto_urb->transfer_buffer_length = seg->isoc_size; - else - seg->dto_urb->transfer_buffer_length = - xfer->urb->iso_frame_desc[curr_iso_frame].length; -} - -/* - * Populate buffer ptr and size, DMA buffer or SG list for the dto urb. - */ -static int __wa_populate_dto_urb(struct wa_xfer *xfer, - struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size) -{ - int result = 0; - - if (xfer->is_dma) { - seg->dto_urb->transfer_dma = - xfer->urb->transfer_dma + buf_itr_offset; - seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - seg->dto_urb->sg = NULL; - seg->dto_urb->num_sgs = 0; - } else { - /* do buffer or SG processing. */ - seg->dto_urb->transfer_flags &= - ~URB_NO_TRANSFER_DMA_MAP; - /* this should always be 0 before a resubmit. */ - seg->dto_urb->num_mapped_sgs = 0; - - if (xfer->urb->transfer_buffer) { - seg->dto_urb->transfer_buffer = - xfer->urb->transfer_buffer + - buf_itr_offset; - seg->dto_urb->sg = NULL; - seg->dto_urb->num_sgs = 0; - } else { - seg->dto_urb->transfer_buffer = NULL; - - /* - * allocate an SG list to store seg_size bytes - * and copy the subset of the xfer->urb->sg that - * matches the buffer subset we are about to - * read. - */ - seg->dto_urb->sg = wa_xfer_create_subset_sg( - xfer->urb->sg, - buf_itr_offset, buf_itr_size, - &(seg->dto_urb->num_sgs)); - if (!(seg->dto_urb->sg)) - result = -ENOMEM; - } - } - seg->dto_urb->transfer_buffer_length = buf_itr_size; - - return result; -} - -/* - * Allocate the segs array and initialize each of them - * - * The segments are freed by wa_xfer_destroy() when the xfer use count - * drops to zero; however, because each segment is given the same life - * cycle as the USB URB it contains, it is actually freed by - * usb_put_urb() on the contained USB URB (twisted, eh?). - */ -static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) -{ - int result, cnt, isoc_frame_offset = 0; - size_t alloc_size = sizeof(*xfer->seg[0]) - - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; - struct usb_device *usb_dev = xfer->wa->usb_dev; - const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; - struct wa_seg *seg; - size_t buf_itr, buf_size, buf_itr_size; - - result = -ENOMEM; - xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); - if (xfer->seg == NULL) - goto error_segs_kzalloc; - buf_itr = 0; - buf_size = xfer->urb->transfer_buffer_length; - for (cnt = 0; cnt < xfer->segs; cnt++) { - size_t iso_pkt_descr_size = 0; - int seg_isoc_frame_count = 0, seg_isoc_size = 0; - - /* - * Adjust the size of the segment object to contain space for - * the isoc packet descriptor buffer. - */ - if (usb_pipeisoc(xfer->urb->pipe)) { - seg_isoc_frame_count = - __wa_seg_calculate_isoc_frame_count(xfer, - isoc_frame_offset, &seg_isoc_size); - - iso_pkt_descr_size = - sizeof(struct wa_xfer_packet_info_hwaiso) + - (seg_isoc_frame_count * sizeof(__le16)); - } - result = -ENOMEM; - seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, - GFP_ATOMIC); - if (seg == NULL) - goto error_seg_kmalloc; - wa_seg_init(seg); - seg->xfer = xfer; - seg->index = cnt; - usb_fill_bulk_urb(&seg->tr_urb, usb_dev, - usb_sndbulkpipe(usb_dev, - dto_epd->bEndpointAddress), - &seg->xfer_hdr, xfer_hdr_size, - wa_seg_tr_cb, seg); - buf_itr_size = min(buf_size, xfer->seg_size); - - if (usb_pipeisoc(xfer->urb->pipe)) { - seg->isoc_frame_count = seg_isoc_frame_count; - seg->isoc_frame_offset = isoc_frame_offset; - seg->isoc_size = seg_isoc_size; - /* iso packet descriptor. */ - seg->isoc_pack_desc_urb = - usb_alloc_urb(0, GFP_ATOMIC); - if (seg->isoc_pack_desc_urb == NULL) - goto error_iso_pack_desc_alloc; - /* - * The buffer for the isoc packet descriptor starts - * after the transfer request header in the - * segment object memory buffer. - */ - usb_fill_bulk_urb( - seg->isoc_pack_desc_urb, usb_dev, - usb_sndbulkpipe(usb_dev, - dto_epd->bEndpointAddress), - (void *)(&seg->xfer_hdr) + - xfer_hdr_size, - iso_pkt_descr_size, - wa_seg_iso_pack_desc_cb, seg); - - /* adjust starting frame offset for next seg. */ - isoc_frame_offset += seg_isoc_frame_count; - } - - if (xfer->is_inbound == 0 && buf_size > 0) { - /* outbound data. */ - seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (seg->dto_urb == NULL) - goto error_dto_alloc; - usb_fill_bulk_urb( - seg->dto_urb, usb_dev, - usb_sndbulkpipe(usb_dev, - dto_epd->bEndpointAddress), - NULL, 0, wa_seg_dto_cb, seg); - - if (usb_pipeisoc(xfer->urb->pipe)) { - /* - * Fill in the xfer buffer information for the - * first isoc frame. Subsequent frames in this - * segment will be filled in and sent from the - * DTO completion routine, if needed. - */ - __wa_populate_dto_urb_isoc(xfer, seg, - seg->isoc_frame_offset); - } else { - /* fill in the xfer buffer information. */ - result = __wa_populate_dto_urb(xfer, seg, - buf_itr, buf_itr_size); - if (result < 0) - goto error_seg_outbound_populate; - - buf_itr += buf_itr_size; - buf_size -= buf_itr_size; - } - } - seg->status = WA_SEG_READY; - } - return 0; - - /* - * Free the memory for the current segment which failed to init. - * Use the fact that cnt is left at were it failed. The remaining - * segments will be cleaned up by wa_xfer_destroy. - */ -error_seg_outbound_populate: - usb_free_urb(xfer->seg[cnt]->dto_urb); -error_dto_alloc: - usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); -error_iso_pack_desc_alloc: - kfree(xfer->seg[cnt]); - xfer->seg[cnt] = NULL; -error_seg_kmalloc: -error_segs_kzalloc: - return result; -} - -/* - * Allocates all the stuff needed to submit a transfer - * - * Breaks the whole data buffer in a list of segments, each one has a - * structure allocated to it and linked in xfer->seg[index] - * - * FIXME: merge setup_segs() and the last part of this function, no - * need to do two for loops when we could run everything in a - * single one - */ -static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) -{ - int result; - struct device *dev = &xfer->wa->usb_iface->dev; - enum wa_xfer_type xfer_type = 0; /* shut up GCC */ - size_t xfer_hdr_size, cnt, transfer_size; - struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; - - result = __wa_xfer_setup_sizes(xfer, &xfer_type); - if (result < 0) - goto error_setup_sizes; - xfer_hdr_size = result; - result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); - if (result < 0) { - dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", - xfer, xfer->segs, result); - goto error_setup_segs; - } - /* Fill the first header */ - xfer_hdr0 = &xfer->seg[0]->xfer_hdr; - wa_xfer_id_init(xfer); - __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); - - /* Fill remaining headers */ - xfer_hdr = xfer_hdr0; - if (xfer_type == WA_XFER_TYPE_ISO) { - xfer_hdr0->dwTransferLength = - cpu_to_le32(xfer->seg[0]->isoc_size); - for (cnt = 1; cnt < xfer->segs; cnt++) { - struct wa_xfer_packet_info_hwaiso *packet_desc; - struct wa_seg *seg = xfer->seg[cnt]; - struct wa_xfer_hwaiso *xfer_iso; - - xfer_hdr = &seg->xfer_hdr; - xfer_iso = container_of(xfer_hdr, - struct wa_xfer_hwaiso, hdr); - packet_desc = ((void *)xfer_hdr) + xfer_hdr_size; - /* - * Copy values from the 0th header. Segment specific - * values are set below. - */ - memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); - xfer_hdr->bTransferSegment = cnt; - xfer_hdr->dwTransferLength = - cpu_to_le32(seg->isoc_size); - xfer_iso->dwNumOfPackets = - cpu_to_le32(seg->isoc_frame_count); - __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); - seg->status = WA_SEG_READY; - } - } else { - transfer_size = urb->transfer_buffer_length; - xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? - cpu_to_le32(xfer->seg_size) : - cpu_to_le32(transfer_size); - transfer_size -= xfer->seg_size; - for (cnt = 1; cnt < xfer->segs; cnt++) { - xfer_hdr = &xfer->seg[cnt]->xfer_hdr; - memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); - xfer_hdr->bTransferSegment = cnt; - xfer_hdr->dwTransferLength = - transfer_size > xfer->seg_size ? - cpu_to_le32(xfer->seg_size) - : cpu_to_le32(transfer_size); - xfer->seg[cnt]->status = WA_SEG_READY; - transfer_size -= xfer->seg_size; - } - } - xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ - result = 0; -error_setup_segs: -error_setup_sizes: - return result; -} - -/* - * - * - * rpipe->seg_lock is held! - */ -static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, - struct wa_seg *seg, int *dto_done) -{ - int result; - - /* default to done unless we encounter a multi-frame isoc segment. */ - *dto_done = 1; - - /* - * Take a ref for each segment urb so the xfer cannot disappear until - * all of the callbacks run. - */ - wa_xfer_get(xfer); - /* submit the transfer request. */ - seg->status = WA_SEG_SUBMITTED; - result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC); - if (result < 0) { - pr_err("%s: xfer %p#%u: REQ submit failed: %d\n", - __func__, xfer, seg->index, result); - wa_xfer_put(xfer); - goto error_tr_submit; - } - /* submit the isoc packet descriptor if present. */ - if (seg->isoc_pack_desc_urb) { - wa_xfer_get(xfer); - result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); - seg->isoc_frame_index = 0; - if (result < 0) { - pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n", - __func__, xfer, seg->index, result); - wa_xfer_put(xfer); - goto error_iso_pack_desc_submit; - } - } - /* submit the out data if this is an out request. */ - if (seg->dto_urb) { - struct wahc *wa = xfer->wa; - wa_xfer_get(xfer); - result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); - if (result < 0) { - pr_err("%s: xfer %p#%u: DTO submit failed: %d\n", - __func__, xfer, seg->index, result); - wa_xfer_put(xfer); - goto error_dto_submit; - } - /* - * If this segment contains more than one isoc frame, hold - * onto the dto resource until we send all frames. - * Only applies to non-Alereon devices. - */ - if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0) - && (seg->isoc_frame_count > 1)) - *dto_done = 0; - } - rpipe_avail_dec(rpipe); - return 0; - -error_dto_submit: - usb_unlink_urb(seg->isoc_pack_desc_urb); -error_iso_pack_desc_submit: - usb_unlink_urb(&seg->tr_urb); -error_tr_submit: - seg->status = WA_SEG_ERROR; - seg->result = result; - *dto_done = 1; - return result; -} - -/* - * Execute more queued request segments until the maximum concurrent allowed. - * Return true if the DTO resource was acquired and released. - * - * The ugly unlock/lock sequence on the error path is needed as the - * xfer->lock normally nests the seg_lock and not viceversa. - */ -static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting) -{ - int result, dto_acquired = 0, dto_done = 0; - struct device *dev = &rpipe->wa->usb_iface->dev; - struct wa_seg *seg; - struct wa_xfer *xfer; - unsigned long flags; - - *dto_waiting = 0; - - spin_lock_irqsave(&rpipe->seg_lock, flags); - while (atomic_read(&rpipe->segs_available) > 0 - && !list_empty(&rpipe->seg_list) - && (dto_acquired = __wa_dto_try_get(rpipe->wa))) { - seg = list_first_entry(&(rpipe->seg_list), struct wa_seg, - list_node); - list_del(&seg->list_node); - xfer = seg->xfer; - /* - * Get a reference to the xfer in case the callbacks for the - * URBs submitted by __wa_seg_submit attempt to complete - * the xfer before this function completes. - */ - wa_xfer_get(xfer); - result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); - /* release the dto resource if this RPIPE is done with it. */ - if (dto_done) - __wa_dto_put(rpipe->wa); - dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", - xfer, wa_xfer_id(xfer), seg->index, - atomic_read(&rpipe->segs_available), result); - if (unlikely(result < 0)) { - int done; - - spin_unlock_irqrestore(&rpipe->seg_lock, flags); - spin_lock_irqsave(&xfer->lock, flags); - __wa_xfer_abort(xfer); - /* - * This seg was marked as submitted when it was put on - * the RPIPE seg_list. Mark it done. - */ - xfer->segs_done++; - done = __wa_xfer_is_done(xfer); - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - spin_lock_irqsave(&rpipe->seg_lock, flags); - } - wa_xfer_put(xfer); - } - /* - * Mark this RPIPE as waiting if dto was not acquired, there are - * delayed segs and no active transfers to wake us up later. - */ - if (!dto_acquired && !list_empty(&rpipe->seg_list) - && (atomic_read(&rpipe->segs_available) == - le16_to_cpu(rpipe->descr.wRequests))) - *dto_waiting = 1; - - spin_unlock_irqrestore(&rpipe->seg_lock, flags); - - return dto_done; -} - -static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) -{ - int dto_waiting; - int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting); - - /* - * If this RPIPE is waiting on the DTO resource, add it to the tail of - * the waiting list. - * Otherwise, if the WA DTO resource was acquired and released by - * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire - * DTO and failed during that time. Check the delayed list and process - * any waiters. Start searching from the next RPIPE index. - */ - if (dto_waiting) - wa_add_delayed_rpipe(rpipe->wa, rpipe); - else if (dto_done) - wa_check_for_delayed_rpipes(rpipe->wa); -} - -/* - * - * xfer->lock is taken - * - * On failure submitting we just stop submitting and return error; - * wa_urb_enqueue_b() will execute the completion path - */ -static int __wa_xfer_submit(struct wa_xfer *xfer) -{ - int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0; - struct wahc *wa = xfer->wa; - struct device *dev = &wa->usb_iface->dev; - unsigned cnt; - struct wa_seg *seg; - unsigned long flags; - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); - u8 available; - u8 empty; - - spin_lock_irqsave(&wa->xfer_list_lock, flags); - list_add_tail(&xfer->list_node, &wa->xfer_list); - spin_unlock_irqrestore(&wa->xfer_list_lock, flags); - - BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); - result = 0; - spin_lock_irqsave(&rpipe->seg_lock, flags); - for (cnt = 0; cnt < xfer->segs; cnt++) { - int delay_seg = 1; - - available = atomic_read(&rpipe->segs_available); - empty = list_empty(&rpipe->seg_list); - seg = xfer->seg[cnt]; - if (available && empty) { - /* - * Only attempt to acquire DTO if we have a segment - * to send. - */ - dto_acquired = __wa_dto_try_get(rpipe->wa); - if (dto_acquired) { - delay_seg = 0; - result = __wa_seg_submit(rpipe, xfer, seg, - &dto_done); - dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n", - xfer, wa_xfer_id(xfer), cnt, available, - empty); - if (dto_done) - __wa_dto_put(rpipe->wa); - - if (result < 0) { - __wa_xfer_abort(xfer); - goto error_seg_submit; - } - } - } - - if (delay_seg) { - dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n", - xfer, wa_xfer_id(xfer), cnt, available, empty); - seg->status = WA_SEG_DELAYED; - list_add_tail(&seg->list_node, &rpipe->seg_list); - } - xfer->segs_submitted++; - } -error_seg_submit: - /* - * Mark this RPIPE as waiting if dto was not acquired, there are - * delayed segs and no active transfers to wake us up later. - */ - if (!dto_acquired && !list_empty(&rpipe->seg_list) - && (atomic_read(&rpipe->segs_available) == - le16_to_cpu(rpipe->descr.wRequests))) - dto_waiting = 1; - spin_unlock_irqrestore(&rpipe->seg_lock, flags); - - if (dto_waiting) - wa_add_delayed_rpipe(rpipe->wa, rpipe); - else if (dto_done) - wa_check_for_delayed_rpipes(rpipe->wa); - - return result; -} - -/* - * Second part of a URB/transfer enqueuement - * - * Assumes this comes from wa_urb_enqueue() [maybe through - * wa_urb_enqueue_run()]. At this point: - * - * xfer->wa filled and refcounted - * xfer->ep filled with rpipe refcounted if - * delayed == 0 - * xfer->urb filled and refcounted (this is the case when called - * from wa_urb_enqueue() as we come from usb_submit_urb() - * and when called by wa_urb_enqueue_run(), as we took an - * extra ref dropped by _run() after we return). - * xfer->gfp filled - * - * If we fail at __wa_xfer_submit(), then we just check if we are done - * and if so, we run the completion procedure. However, if we are not - * yet done, we do nothing and wait for the completion handlers from - * the submitted URBs or from the xfer-result path to kick in. If xfer - * result never kicks in, the xfer will timeout from the USB code and - * dequeue() will be called. - */ -static int wa_urb_enqueue_b(struct wa_xfer *xfer) -{ - int result; - unsigned long flags; - struct urb *urb = xfer->urb; - struct wahc *wa = xfer->wa; - struct wusbhc *wusbhc = wa->wusb; - struct wusb_dev *wusb_dev; - unsigned done; - - result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); - if (result < 0) { - pr_err("%s: error_rpipe_get\n", __func__); - goto error_rpipe_get; - } - result = -ENODEV; - /* FIXME: segmentation broken -- kills DWA */ - mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ - if (urb->dev == NULL) { - mutex_unlock(&wusbhc->mutex); - pr_err("%s: error usb dev gone\n", __func__); - goto error_dev_gone; - } - wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); - if (wusb_dev == NULL) { - mutex_unlock(&wusbhc->mutex); - dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n", - __func__); - goto error_dev_gone; - } - mutex_unlock(&wusbhc->mutex); - - spin_lock_irqsave(&xfer->lock, flags); - xfer->wusb_dev = wusb_dev; - result = urb->status; - if (urb->status != -EINPROGRESS) { - dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__); - goto error_dequeued; - } - - result = __wa_xfer_setup(xfer, urb); - if (result < 0) { - dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__); - goto error_xfer_setup; - } - /* - * Get a xfer reference since __wa_xfer_submit starts asynchronous - * operations that may try to complete the xfer before this function - * exits. - */ - wa_xfer_get(xfer); - result = __wa_xfer_submit(xfer); - if (result < 0) { - dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__); - goto error_xfer_submit; - } - spin_unlock_irqrestore(&xfer->lock, flags); - wa_xfer_put(xfer); - return 0; - - /* - * this is basically wa_xfer_completion() broken up wa_xfer_giveback() - * does a wa_xfer_put() that will call wa_xfer_destroy() and undo - * setup(). - */ -error_xfer_setup: -error_dequeued: - spin_unlock_irqrestore(&xfer->lock, flags); - /* FIXME: segmentation broken, kills DWA */ - if (wusb_dev) - wusb_dev_put(wusb_dev); -error_dev_gone: - rpipe_put(xfer->ep->hcpriv); -error_rpipe_get: - xfer->result = result; - return result; - -error_xfer_submit: - done = __wa_xfer_is_done(xfer); - xfer->result = result; - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - wa_xfer_put(xfer); - /* return success since the completion routine will run. */ - return 0; -} - -/* - * Execute the delayed transfers in the Wire Adapter @wa - * - * We need to be careful here, as dequeue() could be called in the - * middle. That's why we do the whole thing under the - * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock - * and then checks the list -- so as we would be acquiring in inverse - * order, we move the delayed list to a separate list while locked and then - * submit them without the list lock held. - */ -void wa_urb_enqueue_run(struct work_struct *ws) -{ - struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work); - struct wa_xfer *xfer, *next; - struct urb *urb; - LIST_HEAD(tmp_list); - - /* Create a copy of the wa->xfer_delayed_list while holding the lock */ - spin_lock_irq(&wa->xfer_list_lock); - list_cut_position(&tmp_list, &wa->xfer_delayed_list, - wa->xfer_delayed_list.prev); - spin_unlock_irq(&wa->xfer_list_lock); - - /* - * enqueue from temp list without list lock held since wa_urb_enqueue_b - * can take xfer->lock as well as lock mutexes. - */ - list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { - list_del_init(&xfer->list_node); - - urb = xfer->urb; - if (wa_urb_enqueue_b(xfer) < 0) - wa_xfer_giveback(xfer); - usb_put_urb(urb); /* taken when queuing */ - } -} -EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); - -/* - * Process the errored transfers on the Wire Adapter outside of interrupt. - */ -void wa_process_errored_transfers_run(struct work_struct *ws) -{ - struct wahc *wa = container_of(ws, struct wahc, xfer_error_work); - struct wa_xfer *xfer, *next; - LIST_HEAD(tmp_list); - - pr_info("%s: Run delayed STALL processing.\n", __func__); - - /* Create a copy of the wa->xfer_errored_list while holding the lock */ - spin_lock_irq(&wa->xfer_list_lock); - list_cut_position(&tmp_list, &wa->xfer_errored_list, - wa->xfer_errored_list.prev); - spin_unlock_irq(&wa->xfer_list_lock); - - /* - * run rpipe_clear_feature_stalled from temp list without list lock - * held. - */ - list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { - struct usb_host_endpoint *ep; - unsigned long flags; - struct wa_rpipe *rpipe; - - spin_lock_irqsave(&xfer->lock, flags); - ep = xfer->ep; - rpipe = ep->hcpriv; - spin_unlock_irqrestore(&xfer->lock, flags); - - /* clear RPIPE feature stalled without holding a lock. */ - rpipe_clear_feature_stalled(wa, ep); - - /* complete the xfer. This removes it from the tmp list. */ - wa_xfer_completion(xfer); - - /* check for work. */ - wa_xfer_delayed_run(rpipe); - } -} -EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run); - -/* - * Submit a transfer to the Wire Adapter in a delayed way - * - * The process of enqueuing involves possible sleeps() [see - * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are - * in an atomic section, we defer the enqueue_b() call--else we call direct. - * - * @urb: We own a reference to it done by the HCI Linux USB stack that - * will be given up by calling usb_hcd_giveback_urb() or by - * returning error from this function -> ergo we don't have to - * refcount it. - */ -int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, - struct urb *urb, gfp_t gfp) -{ - int result; - struct device *dev = &wa->usb_iface->dev; - struct wa_xfer *xfer; - unsigned long my_flags; - unsigned cant_sleep = irqs_disabled() | in_atomic(); - - if ((urb->transfer_buffer == NULL) - && (urb->sg == NULL) - && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) - && urb->transfer_buffer_length != 0) { - dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); - dump_stack(); - } - - spin_lock_irqsave(&wa->xfer_list_lock, my_flags); - result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb); - spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); - if (result < 0) - goto error_link_urb; - - result = -ENOMEM; - xfer = kzalloc(sizeof(*xfer), gfp); - if (xfer == NULL) - goto error_kmalloc; - - result = -ENOENT; - if (urb->status != -EINPROGRESS) /* cancelled */ - goto error_dequeued; /* before starting? */ - wa_xfer_init(xfer); - xfer->wa = wa_get(wa); - xfer->urb = urb; - xfer->gfp = gfp; - xfer->ep = ep; - urb->hcpriv = xfer; - - dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", - xfer, urb, urb->pipe, urb->transfer_buffer_length, - urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", - urb->pipe & USB_DIR_IN ? "inbound" : "outbound", - cant_sleep ? "deferred" : "inline"); - - if (cant_sleep) { - usb_get_urb(urb); - spin_lock_irqsave(&wa->xfer_list_lock, my_flags); - list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); - spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); - queue_work(wusbd, &wa->xfer_enqueue_work); - } else { - result = wa_urb_enqueue_b(xfer); - if (result < 0) { - /* - * URB submit/enqueue failed. Clean up, return an - * error and do not run the callback. This avoids - * an infinite submit/complete loop. - */ - dev_err(dev, "%s: URB enqueue failed: %d\n", - __func__, result); - wa_put(xfer->wa); - wa_xfer_put(xfer); - spin_lock_irqsave(&wa->xfer_list_lock, my_flags); - usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); - spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); - return result; - } - } - return 0; - -error_dequeued: - kfree(xfer); -error_kmalloc: - spin_lock_irqsave(&wa->xfer_list_lock, my_flags); - usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); - spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); -error_link_urb: - return result; -} -EXPORT_SYMBOL_GPL(wa_urb_enqueue); - -/* - * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion - * handler] is called. - * - * Until a transfer goes successfully through wa_urb_enqueue() it - * needs to be dequeued with completion calling; when stuck in delayed - * or before wa_xfer_setup() is called, we need to do completion. - * - * not setup If there is no hcpriv yet, that means that that enqueue - * still had no time to set the xfer up. Because - * urb->status should be other than -EINPROGRESS, - * enqueue() will catch that and bail out. - * - * If the transfer has gone through setup, we just need to clean it - * up. If it has gone through submit(), we have to abort it [with an - * asynch request] and then make sure we cancel each segment. - * - */ -int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) -{ - unsigned long flags; - struct wa_xfer *xfer; - struct wa_seg *seg; - struct wa_rpipe *rpipe; - unsigned cnt, done = 0, xfer_abort_pending; - unsigned rpipe_ready = 0; - int result; - - /* check if it is safe to unlink. */ - spin_lock_irqsave(&wa->xfer_list_lock, flags); - result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); - if ((result == 0) && urb->hcpriv) { - /* - * Get a xfer ref to prevent a race with wa_xfer_giveback - * cleaning up the xfer while we are working with it. - */ - wa_xfer_get(urb->hcpriv); - } - spin_unlock_irqrestore(&wa->xfer_list_lock, flags); - if (result) - return result; - - xfer = urb->hcpriv; - if (xfer == NULL) - return -ENOENT; - spin_lock_irqsave(&xfer->lock, flags); - pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer)); - rpipe = xfer->ep->hcpriv; - if (rpipe == NULL) { - pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s", - __func__, xfer, wa_xfer_id(xfer), - "Probably already aborted.\n" ); - result = -ENOENT; - goto out_unlock; - } - /* - * Check for done to avoid racing with wa_xfer_giveback and completing - * twice. - */ - if (__wa_xfer_is_done(xfer)) { - pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__, - xfer, wa_xfer_id(xfer)); - result = -ENOENT; - goto out_unlock; - } - /* Check the delayed list -> if there, release and complete */ - spin_lock(&wa->xfer_list_lock); - if (!list_empty(&xfer->list_node) && xfer->seg == NULL) - goto dequeue_delayed; - spin_unlock(&wa->xfer_list_lock); - if (xfer->seg == NULL) /* still hasn't reached */ - goto out_unlock; /* setup(), enqueue_b() completes */ - /* Ok, the xfer is in flight already, it's been setup and submitted.*/ - xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; - /* - * grab the rpipe->seg_lock here to prevent racing with - * __wa_xfer_delayed_run. - */ - spin_lock(&rpipe->seg_lock); - for (cnt = 0; cnt < xfer->segs; cnt++) { - seg = xfer->seg[cnt]; - pr_debug("%s: xfer id 0x%08X#%d status = %d\n", - __func__, wa_xfer_id(xfer), cnt, seg->status); - switch (seg->status) { - case WA_SEG_NOTREADY: - case WA_SEG_READY: - printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", - xfer, cnt, seg->status); - WARN_ON(1); - break; - case WA_SEG_DELAYED: - /* - * delete from rpipe delayed list. If no segments on - * this xfer have been submitted, __wa_xfer_is_done will - * trigger a giveback below. Otherwise, the submitted - * segments will be completed in the DTI interrupt. - */ - seg->status = WA_SEG_ABORTED; - seg->result = -ENOENT; - list_del(&seg->list_node); - xfer->segs_done++; - break; - case WA_SEG_DONE: - case WA_SEG_ERROR: - case WA_SEG_ABORTED: - break; - /* - * The buf_in data for a segment in the - * WA_SEG_DTI_PENDING state is actively being read. - * Let wa_buf_in_cb handle it since it will be called - * and will increment xfer->segs_done. Cleaning up - * here could cause wa_buf_in_cb to access the xfer - * after it has been completed/freed. - */ - case WA_SEG_DTI_PENDING: - break; - /* - * In the states below, the HWA device already knows - * about the transfer. If an abort request was sent, - * allow the HWA to process it and wait for the - * results. Otherwise, the DTI state and seg completed - * counts can get out of sync. - */ - case WA_SEG_SUBMITTED: - case WA_SEG_PENDING: - /* - * Check if the abort was successfully sent. This could - * be false if the HWA has been removed but we haven't - * gotten the disconnect notification yet. - */ - if (!xfer_abort_pending) { - seg->status = WA_SEG_ABORTED; - rpipe_ready = rpipe_avail_inc(rpipe); - xfer->segs_done++; - } - break; - } - } - spin_unlock(&rpipe->seg_lock); - xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ - done = __wa_xfer_is_done(xfer); - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - wa_xfer_put(xfer); - return result; - -out_unlock: - spin_unlock_irqrestore(&xfer->lock, flags); - wa_xfer_put(xfer); - return result; - -dequeue_delayed: - list_del_init(&xfer->list_node); - spin_unlock(&wa->xfer_list_lock); - xfer->result = urb->status; - spin_unlock_irqrestore(&xfer->lock, flags); - wa_xfer_giveback(xfer); - wa_xfer_put(xfer); - usb_put_urb(urb); /* we got a ref in enqueue() */ - return 0; -} -EXPORT_SYMBOL_GPL(wa_urb_dequeue); - -/* - * Translation from WA status codes (WUSB1.0 Table 8.15) to errno - * codes - * - * Positive errno values are internal inconsistencies and should be - * flagged louder. Negative are to be passed up to the user in the - * normal way. - * - * @status: USB WA status code -- high two bits are stripped. - */ -static int wa_xfer_status_to_errno(u8 status) -{ - int errno; - u8 real_status = status; - static int xlat[] = { - [WA_XFER_STATUS_SUCCESS] = 0, - [WA_XFER_STATUS_HALTED] = -EPIPE, - [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, - [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, - [WA_XFER_RESERVED] = EINVAL, - [WA_XFER_STATUS_NOT_FOUND] = 0, - [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, - [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, - [WA_XFER_STATUS_ABORTED] = -ENOENT, - [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, - [WA_XFER_INVALID_FORMAT] = EINVAL, - [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, - [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, - }; - status &= 0x3f; - - if (status == 0) - return 0; - if (status >= ARRAY_SIZE(xlat)) { - printk_ratelimited(KERN_ERR "%s(): BUG? " - "Unknown WA transfer status 0x%02x\n", - __func__, real_status); - return -EINVAL; - } - errno = xlat[status]; - if (unlikely(errno > 0)) { - printk_ratelimited(KERN_ERR "%s(): BUG? " - "Inconsistent WA status: 0x%02x\n", - __func__, real_status); - errno = -errno; - } - return errno; -} - -/* - * If a last segment flag and/or a transfer result error is encountered, - * no other segment transfer results will be returned from the device. - * Mark the remaining submitted or pending xfers as completed so that - * the xfer will complete cleanly. - * - * xfer->lock must be held - * - */ -static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, - int starting_index, enum wa_seg_status status) -{ - int index; - struct wa_rpipe *rpipe = xfer->ep->hcpriv; - - for (index = starting_index; index < xfer->segs_submitted; index++) { - struct wa_seg *current_seg = xfer->seg[index]; - - BUG_ON(current_seg == NULL); - - switch (current_seg->status) { - case WA_SEG_SUBMITTED: - case WA_SEG_PENDING: - case WA_SEG_DTI_PENDING: - rpipe_avail_inc(rpipe); - /* - * do not increment RPIPE avail for the WA_SEG_DELAYED case - * since it has not been submitted to the RPIPE. - */ - /* fall through */ - case WA_SEG_DELAYED: - xfer->segs_done++; - current_seg->status = status; - break; - case WA_SEG_ABORTED: - break; - default: - WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n", - __func__, wa_xfer_id(xfer), index, - current_seg->status); - break; - } - } -} - -/* Populate the given urb based on the current isoc transfer state. */ -static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, - struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) -{ - int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; - int seg_index, total_len = 0, urb_frame_index = urb_start_frame; - struct usb_iso_packet_descriptor *iso_frame_desc = - xfer->urb->iso_frame_desc; - const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd); - int next_frame_contiguous; - struct usb_iso_packet_descriptor *iso_frame; - - BUG_ON(buf_in_urb->status == -EINPROGRESS); - - /* - * If the current frame actual_length is contiguous with the next frame - * and actual_length is a multiple of the DTI endpoint max packet size, - * combine the current frame with the next frame in a single URB. This - * reduces the number of URBs that must be submitted in that case. - */ - seg_index = seg->isoc_frame_index; - do { - next_frame_contiguous = 0; - - iso_frame = &iso_frame_desc[urb_frame_index]; - total_len += iso_frame->actual_length; - ++urb_frame_index; - ++seg_index; - - if (seg_index < seg->isoc_frame_count) { - struct usb_iso_packet_descriptor *next_iso_frame; - - next_iso_frame = &iso_frame_desc[urb_frame_index]; - - if ((iso_frame->offset + iso_frame->actual_length) == - next_iso_frame->offset) - next_frame_contiguous = 1; - } - } while (next_frame_contiguous - && ((iso_frame->actual_length % dti_packet_size) == 0)); - - /* this should always be 0 before a resubmit. */ - buf_in_urb->num_mapped_sgs = 0; - buf_in_urb->transfer_dma = xfer->urb->transfer_dma + - iso_frame_desc[urb_start_frame].offset; - buf_in_urb->transfer_buffer_length = total_len; - buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - buf_in_urb->transfer_buffer = NULL; - buf_in_urb->sg = NULL; - buf_in_urb->num_sgs = 0; - buf_in_urb->context = seg; - - /* return the number of frames included in this URB. */ - return seg_index - seg->isoc_frame_index; -} - -/* Populate the given urb based on the current transfer state. */ -static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer, - unsigned int seg_idx, unsigned int bytes_transferred) -{ - int result = 0; - struct wa_seg *seg = xfer->seg[seg_idx]; - - BUG_ON(buf_in_urb->status == -EINPROGRESS); - /* this should always be 0 before a resubmit. */ - buf_in_urb->num_mapped_sgs = 0; - - if (xfer->is_dma) { - buf_in_urb->transfer_dma = xfer->urb->transfer_dma - + (seg_idx * xfer->seg_size); - buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - buf_in_urb->transfer_buffer = NULL; - buf_in_urb->sg = NULL; - buf_in_urb->num_sgs = 0; - } else { - /* do buffer or SG processing. */ - buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; - - if (xfer->urb->transfer_buffer) { - buf_in_urb->transfer_buffer = - xfer->urb->transfer_buffer - + (seg_idx * xfer->seg_size); - buf_in_urb->sg = NULL; - buf_in_urb->num_sgs = 0; - } else { - /* allocate an SG list to store seg_size bytes - and copy the subset of the xfer->urb->sg - that matches the buffer subset we are - about to read. */ - buf_in_urb->sg = wa_xfer_create_subset_sg( - xfer->urb->sg, - seg_idx * xfer->seg_size, - bytes_transferred, - &(buf_in_urb->num_sgs)); - - if (!(buf_in_urb->sg)) { - buf_in_urb->num_sgs = 0; - result = -ENOMEM; - } - buf_in_urb->transfer_buffer = NULL; - } - } - buf_in_urb->transfer_buffer_length = bytes_transferred; - buf_in_urb->context = seg; - - return result; -} - -/* - * Process a xfer result completion message - * - * inbound transfers: need to schedule a buf_in_urb read - * - * FIXME: this function needs to be broken up in parts - */ -static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, - struct wa_xfer_result *xfer_result) -{ - int result; - struct device *dev = &wa->usb_iface->dev; - unsigned long flags; - unsigned int seg_idx; - struct wa_seg *seg; - struct wa_rpipe *rpipe; - unsigned done = 0; - u8 usb_status; - unsigned rpipe_ready = 0; - unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); - struct urb *buf_in_urb = &(wa->buf_in_urbs[0]); - - spin_lock_irqsave(&xfer->lock, flags); - seg_idx = xfer_result->bTransferSegment & 0x7f; - if (unlikely(seg_idx >= xfer->segs)) - goto error_bad_seg; - seg = xfer->seg[seg_idx]; - rpipe = xfer->ep->hcpriv; - usb_status = xfer_result->bTransferStatus; - dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n", - xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status); - if (seg->status == WA_SEG_ABORTED - || seg->status == WA_SEG_ERROR) /* already handled */ - goto segment_aborted; - if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ - seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ - if (seg->status != WA_SEG_PENDING) { - if (printk_ratelimit()) - dev_err(dev, "xfer %p#%u: Bad segment state %u\n", - xfer, seg_idx, seg->status); - seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ - } - if (usb_status & 0x80) { - seg->result = wa_xfer_status_to_errno(usb_status); - dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n", - xfer, xfer->id, seg->index, usb_status); - seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ? - WA_SEG_ABORTED : WA_SEG_ERROR; - goto error_complete; - } - /* FIXME: we ignore warnings, tally them for stats */ - if (usb_status & 0x40) /* Warning?... */ - usb_status = 0; /* ... pass */ - /* - * If the last segment bit is set, complete the remaining segments. - * When the current segment is completed, either in wa_buf_in_cb for - * transfers with data or below for no data, the xfer will complete. - */ - if (xfer_result->bTransferSegment & 0x80) - wa_complete_remaining_xfer_segs(xfer, seg->index + 1, - WA_SEG_DONE); - if (usb_pipeisoc(xfer->urb->pipe) - && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) { - /* set up WA state to read the isoc packet status next. */ - wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); - wa->dti_isoc_xfer_seg = seg_idx; - wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING; - } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe) - && (bytes_transferred > 0)) { - /* IN data phase: read to buffer */ - seg->status = WA_SEG_DTI_PENDING; - result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx, - bytes_transferred); - if (result < 0) - goto error_buf_in_populate; - ++(wa->active_buf_in_urbs); - result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); - if (result < 0) { - --(wa->active_buf_in_urbs); - goto error_submit_buf_in; - } - } else { - /* OUT data phase or no data, complete it -- */ - seg->result = bytes_transferred; - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - return; - -error_submit_buf_in: - if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "DTI: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - } - if (printk_ratelimit()) - dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", - xfer, seg_idx, result); - seg->result = result; - kfree(buf_in_urb->sg); - buf_in_urb->sg = NULL; -error_buf_in_populate: - __wa_xfer_abort(xfer); - seg->status = WA_SEG_ERROR; -error_complete: - xfer->segs_done++; - rpipe_ready = rpipe_avail_inc(rpipe); - wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status); - done = __wa_xfer_is_done(xfer); - /* - * queue work item to clear STALL for control endpoints. - * Otherwise, let endpoint_reset take care of it. - */ - if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) && - usb_endpoint_xfer_control(&xfer->ep->desc) && - done) { - - dev_info(dev, "Control EP stall. Queue delayed work.\n"); - spin_lock(&wa->xfer_list_lock); - /* move xfer from xfer_list to xfer_errored_list. */ - list_move_tail(&xfer->list_node, &wa->xfer_errored_list); - spin_unlock(&wa->xfer_list_lock); - spin_unlock_irqrestore(&xfer->lock, flags); - queue_work(wusbd, &wa->xfer_error_work); - } else { - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - } - - return; - -error_bad_seg: - spin_unlock_irqrestore(&xfer->lock, flags); - wa_urb_dequeue(wa, xfer->urb, -ENOENT); - if (printk_ratelimit()) - dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); - if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "DTI: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - } - return; - -segment_aborted: - /* nothing to do, as the aborter did the completion */ - spin_unlock_irqrestore(&xfer->lock, flags); -} - -/* - * Process a isochronous packet status message - * - * inbound transfers: need to schedule a buf_in_urb read - */ -static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) -{ - struct device *dev = &wa->usb_iface->dev; - struct wa_xfer_packet_status_hwaiso *packet_status; - struct wa_xfer_packet_status_len_hwaiso *status_array; - struct wa_xfer *xfer; - unsigned long flags; - struct wa_seg *seg; - struct wa_rpipe *rpipe; - unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index; - unsigned first_frame_index = 0, rpipe_ready = 0; - size_t expected_size; - - /* We have a xfer result buffer; check it */ - dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n", - urb->actual_length, urb->transfer_buffer); - packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf); - if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) { - dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n", - packet_status->bPacketType); - goto error_parse_buffer; - } - xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress); - if (xfer == NULL) { - dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n", - wa->dti_isoc_xfer_in_progress); - goto error_parse_buffer; - } - spin_lock_irqsave(&xfer->lock, flags); - if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) - goto error_bad_seg; - seg = xfer->seg[wa->dti_isoc_xfer_seg]; - rpipe = xfer->ep->hcpriv; - expected_size = struct_size(packet_status, PacketStatus, - seg->isoc_frame_count); - if (urb->actual_length != expected_size) { - dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n", - urb->actual_length, expected_size); - goto error_bad_seg; - } - if (le16_to_cpu(packet_status->wLength) != expected_size) { - dev_err(dev, "DTI Error: isoc packet status--bad length %u\n", - le16_to_cpu(packet_status->wLength)); - goto error_bad_seg; - } - /* write isoc packet status and lengths back to the xfer urb. */ - status_array = packet_status->PacketStatus; - xfer->urb->start_frame = - wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd); - for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { - struct usb_iso_packet_descriptor *iso_frame_desc = - xfer->urb->iso_frame_desc; - const int xfer_frame_index = - seg->isoc_frame_offset + seg_index; - - iso_frame_desc[xfer_frame_index].status = - wa_xfer_status_to_errno( - le16_to_cpu(status_array[seg_index].PacketStatus)); - iso_frame_desc[xfer_frame_index].actual_length = - le16_to_cpu(status_array[seg_index].PacketLength); - /* track the number of frames successfully transferred. */ - if (iso_frame_desc[xfer_frame_index].actual_length > 0) { - /* save the starting frame index for buf_in_urb. */ - if (!data_frame_count) - first_frame_index = seg_index; - ++data_frame_count; - } - } - - if (xfer->is_inbound && data_frame_count) { - int result, total_frames_read = 0, urb_index = 0; - struct urb *buf_in_urb; - - /* IN data phase: read to buffer */ - seg->status = WA_SEG_DTI_PENDING; - - /* start with the first frame with data. */ - seg->isoc_frame_index = first_frame_index; - /* submit up to WA_MAX_BUF_IN_URBS read URBs. */ - do { - int urb_frame_index, urb_frame_count; - struct usb_iso_packet_descriptor *iso_frame_desc; - - buf_in_urb = &(wa->buf_in_urbs[urb_index]); - urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, - buf_in_urb, xfer, seg); - /* advance frame index to start of next read URB. */ - seg->isoc_frame_index += urb_frame_count; - total_frames_read += urb_frame_count; - - ++(wa->active_buf_in_urbs); - result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); - - /* skip 0-byte frames. */ - urb_frame_index = - seg->isoc_frame_offset + seg->isoc_frame_index; - iso_frame_desc = - &(xfer->urb->iso_frame_desc[urb_frame_index]); - while ((seg->isoc_frame_index < - seg->isoc_frame_count) && - (iso_frame_desc->actual_length == 0)) { - ++(seg->isoc_frame_index); - ++iso_frame_desc; - } - ++urb_index; - - } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS) - && (seg->isoc_frame_index < - seg->isoc_frame_count)); - - if (result < 0) { - --(wa->active_buf_in_urbs); - dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", - result); - wa_reset_all(wa); - } else if (data_frame_count > total_frames_read) - /* If we need to read more frames, set DTI busy. */ - dti_busy = 1; - } else { - /* OUT transfer or no more IN data, complete it -- */ - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (dti_busy) - wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING; - else - wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - wa_xfer_put(xfer); - return dti_busy; - -error_bad_seg: - spin_unlock_irqrestore(&xfer->lock, flags); - wa_xfer_put(xfer); -error_parse_buffer: - return dti_busy; -} - -/* - * Callback for the IN data phase - * - * If successful transition state; otherwise, take a note of the - * error, mark this segment done and try completion. - * - * Note we don't access until we are sure that the transfer hasn't - * been cancelled (ECONNRESET, ENOENT), which could mean that - * seg->xfer could be already gone. - */ -static void wa_buf_in_cb(struct urb *urb) -{ - struct wa_seg *seg = urb->context; - struct wa_xfer *xfer = seg->xfer; - struct wahc *wa; - struct device *dev; - struct wa_rpipe *rpipe; - unsigned rpipe_ready = 0, isoc_data_frame_count = 0; - unsigned long flags; - int resubmit_dti = 0, active_buf_in_urbs; - u8 done = 0; - - /* free the sg if it was used. */ - kfree(urb->sg); - urb->sg = NULL; - - spin_lock_irqsave(&xfer->lock, flags); - wa = xfer->wa; - dev = &wa->usb_iface->dev; - --(wa->active_buf_in_urbs); - active_buf_in_urbs = wa->active_buf_in_urbs; - rpipe = xfer->ep->hcpriv; - - if (usb_pipeisoc(xfer->urb->pipe)) { - struct usb_iso_packet_descriptor *iso_frame_desc = - xfer->urb->iso_frame_desc; - int seg_index; - - /* - * Find the next isoc frame with data and count how many - * frames with data remain. - */ - seg_index = seg->isoc_frame_index; - while (seg_index < seg->isoc_frame_count) { - const int urb_frame_index = - seg->isoc_frame_offset + seg_index; - - if (iso_frame_desc[urb_frame_index].actual_length > 0) { - /* save the index of the next frame with data */ - if (!isoc_data_frame_count) - seg->isoc_frame_index = seg_index; - ++isoc_data_frame_count; - } - ++seg_index; - } - } - spin_unlock_irqrestore(&xfer->lock, flags); - - switch (urb->status) { - case 0: - spin_lock_irqsave(&xfer->lock, flags); - - seg->result += urb->actual_length; - if (isoc_data_frame_count > 0) { - int result, urb_frame_count; - - /* submit a read URB for the next frame with data. */ - urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb, - xfer, seg); - /* advance index to start of next read URB. */ - seg->isoc_frame_index += urb_frame_count; - ++(wa->active_buf_in_urbs); - result = usb_submit_urb(urb, GFP_ATOMIC); - if (result < 0) { - --(wa->active_buf_in_urbs); - dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", - result); - wa_reset_all(wa); - } - /* - * If we are in this callback and - * isoc_data_frame_count > 0, it means that the dti_urb - * submission was delayed in wa_dti_cb. Once - * we submit the last buf_in_urb, we can submit the - * delayed dti_urb. - */ - resubmit_dti = (isoc_data_frame_count == - urb_frame_count); - } else if (active_buf_in_urbs == 0) { - dev_dbg(dev, - "xfer %p 0x%08X#%u: data in done (%zu bytes)\n", - xfer, wa_xfer_id(xfer), seg->index, - seg->result); - rpipe_ready = rpipe_avail_inc(rpipe); - done = __wa_xfer_mark_seg_as_done(xfer, seg, - WA_SEG_DONE); - } - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - break; - case -ECONNRESET: /* URB unlinked; no need to do anything */ - case -ENOENT: /* as it was done by the who unlinked us */ - break; - default: /* Other errors ... */ - /* - * Error on data buf read. Only resubmit DTI if it hasn't - * already been done by previously hitting this error or by a - * successful completion of the previous buf_in_urb. - */ - resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING; - spin_lock_irqsave(&xfer->lock, flags); - if (printk_ratelimit()) - dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n", - xfer, wa_xfer_id(xfer), seg->index, - urb->status); - if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)){ - dev_err(dev, "DTO: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - } - seg->result = urb->status; - rpipe_ready = rpipe_avail_inc(rpipe); - if (active_buf_in_urbs == 0) - done = __wa_xfer_mark_seg_as_done(xfer, seg, - WA_SEG_ERROR); - else - __wa_xfer_abort(xfer); - spin_unlock_irqrestore(&xfer->lock, flags); - if (done) - wa_xfer_completion(xfer); - if (rpipe_ready) - wa_xfer_delayed_run(rpipe); - } - - if (resubmit_dti) { - int result; - - wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; - - result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); - if (result < 0) { - dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", - result); - wa_reset_all(wa); - } - } -} - -/* - * Handle an incoming transfer result buffer - * - * Given a transfer result buffer, it completes the transfer (possibly - * scheduling and buffer in read) and then resubmits the DTI URB for a - * new transfer result read. - * - * - * The xfer_result DTI URB state machine - * - * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) - * - * We start in OFF mode, the first xfer_result notification [through - * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to - * read. - * - * We receive a buffer -- if it is not a xfer_result, we complain and - * repost the DTI-URB. If it is a xfer_result then do the xfer seg - * request accounting. If it is an IN segment, we move to RBI and post - * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will - * repost the DTI-URB and move to RXR state. if there was no IN - * segment, it will repost the DTI-URB. - * - * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many - * errors) in the URBs. - */ -static void wa_dti_cb(struct urb *urb) -{ - int result, dti_busy = 0; - struct wahc *wa = urb->context; - struct device *dev = &wa->usb_iface->dev; - u32 xfer_id; - u8 usb_status; - - BUG_ON(wa->dti_urb != urb); - switch (wa->dti_urb->status) { - case 0: - if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) { - struct wa_xfer_result *xfer_result; - struct wa_xfer *xfer; - - /* We have a xfer result buffer; check it */ - dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", - urb->actual_length, urb->transfer_buffer); - if (urb->actual_length != sizeof(*xfer_result)) { - dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n", - urb->actual_length, - sizeof(*xfer_result)); - break; - } - xfer_result = (struct wa_xfer_result *)(wa->dti_buf); - if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { - dev_err(dev, "DTI Error: xfer result--bad header length %u\n", - xfer_result->hdr.bLength); - break; - } - if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { - dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n", - xfer_result->hdr.bNotifyType); - break; - } - xfer_id = le32_to_cpu(xfer_result->dwTransferID); - usb_status = xfer_result->bTransferStatus & 0x3f; - if (usb_status == WA_XFER_STATUS_NOT_FOUND) { - /* taken care of already */ - dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n", - __func__, xfer_id, - xfer_result->bTransferSegment & 0x7f); - break; - } - xfer = wa_xfer_get_by_id(wa, xfer_id); - if (xfer == NULL) { - /* FIXME: transaction not found. */ - dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n", - xfer_id, usb_status); - break; - } - wa_xfer_result_chew(wa, xfer, xfer_result); - wa_xfer_put(xfer); - } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) { - dti_busy = wa_process_iso_packet_status(wa, urb); - } else { - dev_err(dev, "DTI Error: unexpected EP state = %d\n", - wa->dti_state); - } - break; - case -ENOENT: /* (we killed the URB)...so, no broadcast */ - case -ESHUTDOWN: /* going away! */ - dev_dbg(dev, "DTI: going down! %d\n", urb->status); - goto out; - default: - /* Unknown error */ - if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "DTI: URB max acceptable errors " - "exceeded, resetting device\n"); - wa_reset_all(wa); - goto out; - } - if (printk_ratelimit()) - dev_err(dev, "DTI: URB error %d\n", urb->status); - break; - } - - /* Resubmit the DTI URB if we are not busy processing isoc in frames. */ - if (!dti_busy) { - result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); - if (result < 0) { - dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", - result); - wa_reset_all(wa); - } - } -out: - return; -} - -/* - * Initialize the DTI URB for reading transfer result notifications and also - * the buffer-in URB, for reading buffers. Then we just submit the DTI URB. - */ -int wa_dti_start(struct wahc *wa) -{ - const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; - struct device *dev = &wa->usb_iface->dev; - int result = -ENOMEM, index; - - if (wa->dti_urb != NULL) /* DTI URB already started */ - goto out; - - wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); - if (wa->dti_urb == NULL) - goto error_dti_urb_alloc; - usb_fill_bulk_urb( - wa->dti_urb, wa->usb_dev, - usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress), - wa->dti_buf, wa->dti_buf_size, - wa_dti_cb, wa); - - /* init the buf in URBs */ - for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) { - usb_fill_bulk_urb( - &(wa->buf_in_urbs[index]), wa->usb_dev, - usb_rcvbulkpipe(wa->usb_dev, - 0x80 | dti_epd->bEndpointAddress), - NULL, 0, wa_buf_in_cb, wa); - } - result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); - if (result < 0) { - dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", - result); - goto error_dti_urb_submit; - } -out: - return 0; - -error_dti_urb_submit: - usb_put_urb(wa->dti_urb); - wa->dti_urb = NULL; -error_dti_urb_alloc: - return result; -} -EXPORT_SYMBOL_GPL(wa_dti_start); -/* - * Transfer complete notification - * - * Called from the notif.c code. We get a notification on EP2 saying - * that some endpoint has some transfer result data available. We are - * about to read it. - * - * To speed up things, we always have a URB reading the DTI URB; we - * don't really set it up and start it until the first xfer complete - * notification arrives, which is what we do here. - * - * Follow up in wa_dti_cb(), as that's where the whole state - * machine starts. - * - * @wa shall be referenced - */ -void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) -{ - struct device *dev = &wa->usb_iface->dev; - struct wa_notif_xfer *notif_xfer; - const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; - - notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); - BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); - - if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { - /* FIXME: hardcoded limitation, adapt */ - dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", - notif_xfer->bEndpoint, dti_epd->bEndpointAddress); - goto error; - } - - /* attempt to start the DTI ep processing. */ - if (wa_dti_start(wa) < 0) - goto error; - - return; - -error: - wa_reset_all(wa); -} diff --git a/drivers/staging/wusbcore/wusbhc.c b/drivers/staging/wusbcore/wusbhc.c deleted file mode 100644 index d0b404d258e8..000000000000 --- a/drivers/staging/wusbcore/wusbhc.c +++ /dev/null @@ -1,490 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Host Controller - * sysfs glue, wusbcore module support and life cycle management - * - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Creation/destruction of wusbhc is split in two parts; that that - * doesn't require the HCD to be added (wusbhc_{create,destroy}) and - * the one that requires (phase B, wusbhc_b_{create,destroy}). - * - * This is so because usb_add_hcd() will start the HC, and thus, all - * the HC specific stuff has to be already initialized (like sysfs - * thingies). - */ -#include <linux/device.h> -#include <linux/module.h> -#include "wusbhc.h" - -/** - * Extract the wusbhc that corresponds to a USB Host Controller class device - * - * WARNING! Apply only if @dev is that of a - * wusbhc.usb_hcd.self->class_dev; otherwise, you loose. - */ -static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev) -{ - struct usb_bus *usb_bus = dev_get_drvdata(dev); - struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus); - return usb_hcd_to_wusbhc(usb_hcd); -} - -/* - * Show & store the current WUSB trust timeout - * - * We don't do locking--it is an 'atomic' value. - * - * The units that we store/show are always MILLISECONDS. However, the - * value of trust_timeout is jiffies. - */ -static ssize_t wusb_trust_timeout_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - - return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout); -} - -static ssize_t wusb_trust_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - ssize_t result = -ENOSYS; - unsigned trust_timeout; - - result = sscanf(buf, "%u", &trust_timeout); - if (result != 1) { - result = -EINVAL; - goto out; - } - wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500); - cancel_delayed_work(&wusbhc->keep_alive_timer); - flush_workqueue(wusbd); - queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, - msecs_to_jiffies(wusbhc->trust_timeout / 2)); -out: - return result < 0 ? result : size; -} -static DEVICE_ATTR_RW(wusb_trust_timeout); - -/* - * Show the current WUSB CHID. - */ -static ssize_t wusb_chid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - const struct wusb_ckhdid *chid; - - if (wusbhc->wuie_host_info != NULL) - chid = &wusbhc->wuie_host_info->CHID; - else - chid = &wusb_ckhdid_zero; - - return sprintf(buf, "%16ph\n", chid->data); -} - -/* - * Store a new CHID. - * - * - Write an all zeros CHID and it will stop the controller - * - Write a non-zero CHID and it will start it. - * - * See wusbhc_chid_set() for more info. - */ -static ssize_t wusb_chid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - struct wusb_ckhdid chid; - ssize_t result; - - result = sscanf(buf, - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx\n", - &chid.data[0] , &chid.data[1] , - &chid.data[2] , &chid.data[3] , - &chid.data[4] , &chid.data[5] , - &chid.data[6] , &chid.data[7] , - &chid.data[8] , &chid.data[9] , - &chid.data[10], &chid.data[11], - &chid.data[12], &chid.data[13], - &chid.data[14], &chid.data[15]); - if (result != 16) { - dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): " - "%d\n", (int)result); - return -EINVAL; - } - result = wusbhc_chid_set(wusbhc, &chid); - return result < 0 ? result : size; -} -static DEVICE_ATTR_RW(wusb_chid); - - -static ssize_t wusb_phy_rate_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - - return sprintf(buf, "%d\n", wusbhc->phy_rate); -} - -static ssize_t wusb_phy_rate_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - uint8_t phy_rate; - ssize_t result; - - result = sscanf(buf, "%hhu", &phy_rate); - if (result != 1) - return -EINVAL; - if (phy_rate >= UWB_PHY_RATE_INVALID) - return -EINVAL; - - wusbhc->phy_rate = phy_rate; - return size; -} -static DEVICE_ATTR_RW(wusb_phy_rate); - -static ssize_t wusb_dnts_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - - return sprintf(buf, "num slots: %d\ninterval: %dms\n", - wusbhc->dnts_num_slots, wusbhc->dnts_interval); -} - -static ssize_t wusb_dnts_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - uint8_t num_slots, interval; - ssize_t result; - - result = sscanf(buf, "%hhu %hhu", &num_slots, &interval); - - if (result != 2) - return -EINVAL; - - wusbhc->dnts_num_slots = num_slots; - wusbhc->dnts_interval = interval; - - return size; -} -static DEVICE_ATTR_RW(wusb_dnts); - -static ssize_t wusb_retry_count_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - - return sprintf(buf, "%d\n", wusbhc->retry_count); -} - -static ssize_t wusb_retry_count_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); - uint8_t retry_count; - ssize_t result; - - result = sscanf(buf, "%hhu", &retry_count); - - if (result != 1) - return -EINVAL; - - wusbhc->retry_count = max_t(uint8_t, retry_count, - WUSB_RETRY_COUNT_MAX); - - return size; -} -static DEVICE_ATTR_RW(wusb_retry_count); - -/* Group all the WUSBHC attributes */ -static struct attribute *wusbhc_attrs[] = { - &dev_attr_wusb_trust_timeout.attr, - &dev_attr_wusb_chid.attr, - &dev_attr_wusb_phy_rate.attr, - &dev_attr_wusb_dnts.attr, - &dev_attr_wusb_retry_count.attr, - NULL, -}; - -static const struct attribute_group wusbhc_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = wusbhc_attrs, -}; - -/* - * Create a wusbhc instance - * - * NOTEs: - * - * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been - * initialized but not added. - * - * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling. - * - * - fill out wusbhc->uwb_rc and refcount it before calling - * - fill out the wusbhc->sec_modes array - */ -int wusbhc_create(struct wusbhc *wusbhc) -{ - int result = 0; - - /* set defaults. These can be overwritten using sysfs attributes. */ - wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS; - wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1; - wusbhc->dnts_num_slots = 4; - wusbhc->dnts_interval = 2; - wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE; - - mutex_init(&wusbhc->mutex); - result = wusbhc_mmcie_create(wusbhc); - if (result < 0) - goto error_mmcie_create; - result = wusbhc_devconnect_create(wusbhc); - if (result < 0) - goto error_devconnect_create; - result = wusbhc_rh_create(wusbhc); - if (result < 0) - goto error_rh_create; - result = wusbhc_sec_create(wusbhc); - if (result < 0) - goto error_sec_create; - return 0; - -error_sec_create: - wusbhc_rh_destroy(wusbhc); -error_rh_create: - wusbhc_devconnect_destroy(wusbhc); -error_devconnect_create: - wusbhc_mmcie_destroy(wusbhc); -error_mmcie_create: - return result; -} -EXPORT_SYMBOL_GPL(wusbhc_create); - -static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc) -{ - return &wusbhc->usb_hcd.self.controller->kobj; -} - -/* - * Phase B of a wusbhc instance creation - * - * Creates fields that depend on wusbhc->usb_hcd having been - * added. This is where we create the sysfs files in - * /sys/class/usb_host/usb_hostX/. - * - * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper - * layer (hwahc or whci) - */ -int wusbhc_b_create(struct wusbhc *wusbhc) -{ - int result = 0; - struct device *dev = wusbhc->usb_hcd.self.controller; - - result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); - if (result < 0) { - dev_err(dev, "Cannot register WUSBHC attributes: %d\n", - result); - goto error_create_attr_group; - } - - return 0; -error_create_attr_group: - return result; -} -EXPORT_SYMBOL_GPL(wusbhc_b_create); - -void wusbhc_b_destroy(struct wusbhc *wusbhc) -{ - wusbhc_pal_unregister(wusbhc); - sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); -} -EXPORT_SYMBOL_GPL(wusbhc_b_destroy); - -void wusbhc_destroy(struct wusbhc *wusbhc) -{ - wusbhc_sec_destroy(wusbhc); - wusbhc_rh_destroy(wusbhc); - wusbhc_devconnect_destroy(wusbhc); - wusbhc_mmcie_destroy(wusbhc); -} -EXPORT_SYMBOL_GPL(wusbhc_destroy); - -struct workqueue_struct *wusbd; -EXPORT_SYMBOL_GPL(wusbd); - -/* - * WUSB Cluster ID allocation map - * - * Each WUSB bus in a channel is identified with a Cluster Id in the - * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff - * (that's space for 31 WUSB controllers, as 0xff can't be taken). We - * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff). - * - * For each one we taken, we pin it in the bitap - */ -#define CLUSTER_IDS 32 -static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS); -static DEFINE_SPINLOCK(wusb_cluster_ids_lock); - -/* - * Get a WUSB Cluster ID - * - * Need to release with wusb_cluster_id_put() when done w/ it. - */ -/* FIXME: coordinate with the choose_addres() from the USB stack */ -/* we want to leave the top of the 128 range for cluster addresses and - * the bottom for device addresses (as we map them one on one with - * ports). */ -u8 wusb_cluster_id_get(void) -{ - u8 id; - spin_lock(&wusb_cluster_ids_lock); - id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS); - if (id >= CLUSTER_IDS) { - id = 0; - goto out; - } - set_bit(id, wusb_cluster_id_table); - id = (u8) 0xff - id; -out: - spin_unlock(&wusb_cluster_ids_lock); - return id; - -} -EXPORT_SYMBOL_GPL(wusb_cluster_id_get); - -/* - * Release a WUSB Cluster ID - * - * Obtained it with wusb_cluster_id_get() - */ -void wusb_cluster_id_put(u8 id) -{ - id = 0xff - id; - BUG_ON(id >= CLUSTER_IDS); - spin_lock(&wusb_cluster_ids_lock); - WARN_ON(!test_bit(id, wusb_cluster_id_table)); - clear_bit(id, wusb_cluster_id_table); - spin_unlock(&wusb_cluster_ids_lock); -} -EXPORT_SYMBOL_GPL(wusb_cluster_id_put); - -/** - * wusbhc_giveback_urb - return an URB to the USB core - * @wusbhc: the host controller the URB is from. - * @urb: the URB. - * @status: the URB's status. - * - * Return an URB to the USB core doing some additional WUSB specific - * processing. - * - * - After a successful transfer, update the trust timeout timestamp - * for the WUSB device. - * - * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission - * condition for the WCONNECTACK_IE is that the host has observed - * the associated device responding to a control transfer. - */ -void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status) -{ - struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, - urb->dev); - - if (status == 0 && wusb_dev) { - wusb_dev->entry_ts = jiffies; - - /* wusbhc_devconnect_acked() can't be called from - atomic context so defer it to a work queue. */ - if (!list_empty(&wusb_dev->cack_node)) - queue_work(wusbd, &wusb_dev->devconnect_acked_work); - else - wusb_dev_put(wusb_dev); - } - - usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); -} -EXPORT_SYMBOL_GPL(wusbhc_giveback_urb); - -/** - * wusbhc_reset_all - reset the HC hardware - * @wusbhc: the host controller to reset. - * - * Request a full hardware reset of the chip. This will also reset - * the radio controller and any other PALs. - */ -void wusbhc_reset_all(struct wusbhc *wusbhc) -{ - if (wusbhc->uwb_rc) - uwb_rc_reset_all(wusbhc->uwb_rc); -} -EXPORT_SYMBOL_GPL(wusbhc_reset_all); - -static struct notifier_block wusb_usb_notifier = { - .notifier_call = wusb_usb_ncb, - .priority = INT_MAX /* Need to be called first of all */ -}; - -static int __init wusbcore_init(void) -{ - int result; - result = wusb_crypto_init(); - if (result < 0) - goto error_crypto_init; - /* WQ is singlethread because we need to serialize notifications */ - wusbd = create_singlethread_workqueue("wusbd"); - if (wusbd == NULL) { - result = -ENOMEM; - printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n"); - goto error_wusbd_create; - } - usb_register_notify(&wusb_usb_notifier); - bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS); - set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */ - return 0; - -error_wusbd_create: - wusb_crypto_exit(); -error_crypto_init: - return result; - -} -module_init(wusbcore_init); - -static void __exit wusbcore_exit(void) -{ - clear_bit(0, wusb_cluster_id_table); - if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) { - printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n", - CLUSTER_IDS, wusb_cluster_id_table); - WARN_ON(1); - } - usb_unregister_notify(&wusb_usb_notifier); - destroy_workqueue(wusbd); - wusb_crypto_exit(); -} -module_exit(wusbcore_exit); - -MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); -MODULE_DESCRIPTION("Wireless USB core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wusbcore/wusbhc.h b/drivers/staging/wusbcore/wusbhc.h deleted file mode 100644 index 716244a2ec44..000000000000 --- a/drivers/staging/wusbcore/wusbhc.h +++ /dev/null @@ -1,487 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Host Controller - * Common infrastructure for WHCI and HWA WUSB-HC drivers - * - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This driver implements parts common to all Wireless USB Host - * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used - * by: - * - * - hwahc: HWA, USB-dongle that implements a Wireless USB host - * controller, (Wireless USB 1.0 Host-Wire-Adapter specification). - * - * - whci: WHCI, a PCI card with a wireless host controller - * (Wireless Host Controller Interface 1.0 specification). - * - * Check out the Design-overview.txt file in the source documentation - * for other details on the implementation. - * - * Main blocks: - * - * rh Root Hub emulation (part of the HCD glue) - * - * devconnect Handle all the issues related to device connection, - * authentication, disconnection, timeout, reseting, - * keepalives, etc. - * - * mmc MMC IE broadcasting handling - * - * A host controller driver just initializes its stuff and as part of - * that, creates a 'struct wusbhc' instance that handles all the - * common WUSB mechanisms. Links in the function ops that are specific - * to it and then registers the host controller. Ready to run. - */ - -#ifndef __WUSBHC_H__ -#define __WUSBHC_H__ - -#include <linux/usb.h> -#include <linux/list.h> -#include <linux/mutex.h> -#include <linux/kref.h> -#include <linux/workqueue.h> -#include <linux/usb/hcd.h> -#include "../uwb/uwb.h" -#include "include/wusb.h" - -/* - * Time from a WUSB channel stop request to the last transmitted MMC. - * - * This needs to be > 4.096 ms in case no MMCs can be transmitted in - * zone 0. - */ -#define WUSB_CHANNEL_STOP_DELAY_MS 8 -#define WUSB_RETRY_COUNT_MAX 15 -#define WUSB_RETRY_COUNT_INFINITE 0 - -/** - * Wireless USB device - * - * Describe a WUSB device connected to the cluster. This struct - * belongs to the 'struct wusb_port' it is attached to and it is - * responsible for putting and clearing the pointer to it. - * - * Note this "complements" the 'struct usb_device' that the usb_hcd - * keeps for each connected USB device. However, it extends some - * information that is not available (there is no hcpriv ptr in it!) - * *and* most importantly, it's life cycle is different. It is created - * as soon as we get a DN_Connect (connect request notification) from - * the device through the WUSB host controller; the USB stack doesn't - * create the device until we authenticate it. FIXME: this will - * change. - * - * @bos: This is allocated when the BOS descriptors are read from - * the device and freed upon the wusb_dev struct dying. - * @wusb_cap_descr: points into @bos, and has been verified to be size - * safe. - */ -struct wusb_dev { - struct kref refcnt; - struct wusbhc *wusbhc; - struct list_head cack_node; /* Connect-Ack list */ - struct list_head rekey_node; /* GTK rekey list */ - u8 port_idx; - u8 addr; - u8 beacon_type:4; - struct usb_encryption_descriptor ccm1_etd; - struct wusb_ckhdid cdid; - unsigned long entry_ts; - struct usb_bos_descriptor *bos; - struct usb_wireless_cap_descriptor *wusb_cap_descr; - struct uwb_mas_bm availability; - struct work_struct devconnect_acked_work; - struct usb_device *usb_dev; -}; - -#define WUSB_DEV_ADDR_UNAUTH 0x80 - -static inline void wusb_dev_init(struct wusb_dev *wusb_dev) -{ - kref_init(&wusb_dev->refcnt); - /* no need to init the cack_node */ -} - -extern void wusb_dev_destroy(struct kref *_wusb_dev); - -static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev) -{ - kref_get(&wusb_dev->refcnt); - return wusb_dev; -} - -static inline void wusb_dev_put(struct wusb_dev *wusb_dev) -{ - kref_put(&wusb_dev->refcnt, wusb_dev_destroy); -} - -/** - * Wireless USB Host Controller root hub "fake" ports - * (state and device information) - * - * Wireless USB is wireless, so there are no ports; but we - * fake'em. Each RC can connect a max of devices at the same time - * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's - * caps), referred to in wusbhc->ports_max. - * - * See rh.c for more information. - * - * The @status and @change use the same bits as in USB2.0[11.24.2.7], - * so we don't have to do much when getting the port's status. - * - * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10], - * include/linux/usb_ch9.h (#define USB_PORT_STAT_*) - */ -struct wusb_port { - u16 status; - u16 change; - struct wusb_dev *wusb_dev; /* connected device's info */ - u32 ptk_tkid; -}; - -/** - * WUSB Host Controller specifics - * - * All fields that are common to all Wireless USB controller types - * (HWA and WHCI) are grouped here. Host Controller - * functions/operations that only deal with general Wireless USB HC - * issues use this data type to refer to the host. - * - * @usb_hcd Instantiation of a USB host controller - * (initialized by upper layer [HWA=HC or WHCI]. - * - * @dev Device that implements this; initialized by the - * upper layer (HWA-HC, WHCI...); this device should - * have a refcount. - * - * @trust_timeout After this time without hearing for device - * activity, we consider the device gone and we have to - * re-authenticate. - * - * Can be accessed w/o locking--however, read to a - * local variable then use. - * - * @chid WUSB Cluster Host ID: this is supposed to be a - * unique value that doesn't change across reboots (so - * that your devices do not require re-association). - * - * Read/Write protected by @mutex - * - * @dev_info This array has ports_max elements. It is used to - * give the HC information about the WUSB devices (see - * 'struct wusb_dev_info'). - * - * For HWA we need to allocate it in heap; for WHCI it - * needs to be permanently mapped, so we keep it for - * both and make it easy. Call wusbhc->dev_info_set() - * to update an entry. - * - * @ports_max Number of simultaneous device connections (fake - * ports) this HC will take. Read-only. - * - * @port Array of port status for each fake root port. Guaranteed to - * always be the same length during device existence - * [this allows for some unlocked but referenced reading]. - * - * @mmcies_max Max number of Information Elements this HC can send - * in its MMC. Read-only. - * - * @start Start the WUSB channel. - * - * @stop Stop the WUSB channel after the specified number of - * milliseconds. Channel Stop IEs should be transmitted - * as required by [WUSB] 4.16.2.1. - * - * @mmcie_add HC specific operation (WHCI or HWA) for adding an - * MMCIE. - * - * @mmcie_rm HC specific operation (WHCI or HWA) for removing an - * MMCIE. - * - * @set_ptk: Set the PTK and enable encryption for a device. Or, if - * the supplied key is NULL, disable encryption for that - * device. - * - * @set_gtk: Set the GTK to be used for all future broadcast packets - * (i.e., MMCs). With some hardware, setting the GTK may start - * MMC transmission. - * - * NOTE: - * - * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid - * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev - * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a - * refcount on it). - * - * Most of the times when you need to use it, it will be non-NULL, - * so there is no real need to check for it (wusb_dev will - * disappear before usb_dev). - * - * - The following fields need to be filled out before calling - * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}. - * - * - there is no wusbhc_init() method, we do everything in - * wusbhc_create(). - * - * - Creation is done in two phases, wusbhc_create() and - * wusbhc_create_b(); b are the parts that need to be called after - * calling usb_hcd_add(&wusbhc->usb_hcd). - */ -struct wusbhc { - struct usb_hcd usb_hcd; /* HAS TO BE 1st */ - struct device *dev; - struct uwb_rc *uwb_rc; - struct uwb_pal pal; - - unsigned trust_timeout; /* in jiffies */ - struct wusb_ckhdid chid; - uint8_t phy_rate; - uint8_t dnts_num_slots; - uint8_t dnts_interval; - uint8_t retry_count; - struct wuie_host_info *wuie_host_info; - - struct mutex mutex; /* locks everything else */ - u16 cluster_id; /* Wireless USB Cluster ID */ - struct wusb_port *port; /* Fake port status handling */ - struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */ - u8 ports_max; - unsigned active:1; /* currently xmit'ing MMCs */ - struct wuie_keep_alive keep_alive_ie; /* protected by mutex */ - struct delayed_work keep_alive_timer; - struct list_head cack_list; /* Connect acknowledging */ - size_t cack_count; /* protected by 'mutex' */ - struct wuie_connect_ack cack_ie; - struct uwb_rsv *rsv; /* cluster bandwidth reservation */ - - struct mutex mmcie_mutex; /* MMC WUIE handling */ - struct wuie_hdr **mmcie; /* WUIE array */ - u8 mmcies_max; - /* FIXME: make wusbhc_ops? */ - int (*start)(struct wusbhc *wusbhc); - void (*stop)(struct wusbhc *wusbhc, int delay); - int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, - u8 handle, struct wuie_hdr *wuie); - int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); - int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev); - int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index, - const struct uwb_mas_bm *); - int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx, - u32 tkid, const void *key, size_t key_size); - int (*set_gtk)(struct wusbhc *wusbhc, - u32 tkid, const void *key, size_t key_size); - int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots); - - struct { - struct usb_key_descriptor descr; - u8 data[16]; /* GTK key data */ - } __attribute__((packed)) gtk; - u8 gtk_index; - u32 gtk_tkid; - - /* workqueue for WUSB security related tasks. */ - struct workqueue_struct *wq_security; - struct work_struct gtk_rekey_work; - - struct usb_encryption_descriptor *ccm1_etd; -}; - -#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd) - - -extern int wusbhc_create(struct wusbhc *); -extern int wusbhc_b_create(struct wusbhc *); -extern void wusbhc_b_destroy(struct wusbhc *); -extern void wusbhc_destroy(struct wusbhc *); -extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *, - struct wusb_dev *); -extern void wusb_dev_sysfs_rm(struct wusb_dev *); -extern int wusbhc_sec_create(struct wusbhc *); -extern int wusbhc_sec_start(struct wusbhc *); -extern void wusbhc_sec_stop(struct wusbhc *); -extern void wusbhc_sec_destroy(struct wusbhc *); -extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, - int status); -void wusbhc_reset_all(struct wusbhc *wusbhc); - -int wusbhc_pal_register(struct wusbhc *wusbhc); -void wusbhc_pal_unregister(struct wusbhc *wusbhc); - -/* - * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone - * - * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) - * - * This is a safe assumption as @usb_dev->bus is referenced all the - * time during the @usb_dev life cycle. - */ -static inline -struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev) -{ - struct usb_hcd *usb_hcd; - usb_hcd = bus_to_hcd(usb_dev->bus); - return usb_get_hcd(usb_hcd); -} - -/* - * Increment the reference count on a wusbhc. - * - * @wusbhc's life cycle is identical to that of the underlying usb_hcd. - */ -static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc) -{ - return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL; -} - -/* - * Return the wusbhc associated to a @usb_dev - * - * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) - * - * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down. - * WARNING: referenced at the usb_hcd level, unlocked - * - * FIXME: move offline - */ -static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev) -{ - struct wusbhc *wusbhc = NULL; - struct usb_hcd *usb_hcd; - if (usb_dev->devnum > 1 && !usb_dev->wusb) { - /* but root hubs */ - dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum, - usb_dev->wusb); - BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb); - } - usb_hcd = usb_hcd_get_by_usb_dev(usb_dev); - if (usb_hcd == NULL) - return NULL; - BUG_ON(usb_hcd->wireless == 0); - return wusbhc = usb_hcd_to_wusbhc(usb_hcd); -} - - -static inline void wusbhc_put(struct wusbhc *wusbhc) -{ - usb_put_hcd(&wusbhc->usb_hcd); -} - -int wusbhc_start(struct wusbhc *wusbhc); -void wusbhc_stop(struct wusbhc *wusbhc); -extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); - -/* Device connect handling */ -extern int wusbhc_devconnect_create(struct wusbhc *); -extern void wusbhc_devconnect_destroy(struct wusbhc *); -extern int wusbhc_devconnect_start(struct wusbhc *wusbhc); -extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); -extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, - struct wusb_dn_hdr *dn_hdr, size_t size); -extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); -extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, - void *priv); -extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, - u8 addr); - -/* Wireless USB fake Root Hub methods */ -extern int wusbhc_rh_create(struct wusbhc *); -extern void wusbhc_rh_destroy(struct wusbhc *); - -extern int wusbhc_rh_status_data(struct usb_hcd *, char *); -extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16); -extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned); - -/* MMC handling */ -extern int wusbhc_mmcie_create(struct wusbhc *); -extern void wusbhc_mmcie_destroy(struct wusbhc *); -extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt, - struct wuie_hdr *); -extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *); - -/* Bandwidth reservation */ -int wusbhc_rsv_establish(struct wusbhc *wusbhc); -void wusbhc_rsv_terminate(struct wusbhc *wusbhc); - -/* - * I've always said - * I wanted a wedding in a church... - * - * but lately I've been thinking about - * the Botanical Gardens. - * - * We could do it by the tulips. - * It'll be beautiful - * - * --Security! - */ -extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *, - struct wusb_dev *); -extern void wusb_dev_sec_rm(struct wusb_dev *) ; -extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, - struct wusb_ckhdid *ck); -void wusbhc_gtk_rekey(struct wusbhc *wusbhc); -int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); - - -/* WUSB Cluster ID handling */ -extern u8 wusb_cluster_id_get(void); -extern void wusb_cluster_id_put(u8); - -/* - * wusb_port_by_idx - return the port associated to a zero-based port index - * - * NOTE: valid without locking as long as wusbhc is referenced (as the - * number of ports doesn't change). The data pointed to has to - * be verified though :) - */ -static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc, - u8 port_idx) -{ - return &wusbhc->port[port_idx]; -} - -/* - * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to - * a port_idx. - * - * USB stack USB ports are 1 based!! - * - * NOTE: only valid for WUSB devices!!! - */ -static inline u8 wusb_port_no_to_idx(u8 port_no) -{ - return port_no - 1; -} - -extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *, - struct usb_device *); - -/* - * Return a referenced wusb_dev given a @usb_dev - * - * Returns NULL if the usb_dev is being torn down. - * - * FIXME: move offline - */ -static inline -struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev) -{ - struct wusbhc *wusbhc; - struct wusb_dev *wusb_dev; - wusbhc = wusbhc_get_by_usb_dev(usb_dev); - if (wusbhc == NULL) - return NULL; - mutex_lock(&wusbhc->mutex); - wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); - mutex_unlock(&wusbhc->mutex); - wusbhc_put(wusbhc); - return wusb_dev; -} - -/* Misc */ - -extern struct workqueue_struct *wusbd; -#endif /* #ifndef __WUSBHC_H__ */ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index b94ed4e30770..09e55ea0bf5d 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, conn->cid); - if (target_get_sess_cmd(&cmd->se_cmd, true) < 0) - return iscsit_add_reject_cmd(cmd, - ISCSI_REASON_WAITING_FOR_LOGOUT, buf); + target_get_sess_cmd(&cmd->se_cmd, true); cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, scsilun_to_int(&hdr->lun)); @@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); - if (target_get_sess_cmd(&cmd->se_cmd, true) < 0) - return iscsit_add_reject_cmd(cmd, - ISCSI_REASON_WAITING_FOR_LOGOUT, buf); + target_get_sess_cmd(&cmd->se_cmd, true); /* * TASK_REASSIGN for ERL=2 / connection stays inside of @@ -4149,6 +4145,9 @@ int iscsit_close_connection( iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4231,11 +4230,6 @@ int iscsit_close_connection( * must wait until they have completed. */ iscsit_check_conn_usage_count(conn); - target_sess_cmd_list_set_waiting(sess->se_sess); - target_wait_for_sess_cmds(sess->se_sess); - - if (conn->conn_transport->iscsit_wait_conn) - conn->conn_transport->iscsit_wait_conn(conn); ahash_request_free(conn->conn_tx_hash); if (conn->conn_rx_hash) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ea482d4b1f00..0ae9e60fc4d5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) target_remove_from_state_list(cmd); + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of @@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) return cmd->se_tfo->check_stop_free(cmd); } +static void transport_lun_remove_cmd(struct se_cmd *cmd) +{ + struct se_lun *lun = cmd->se_lun; + + if (!lun) + return; + + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); +} + static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd) WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); } @@ -1708,6 +1726,7 @@ static void target_complete_tmr_failure(struct work_struct *work) se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); + transport_lun_remove_cmd(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); } @@ -1898,6 +1917,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, goto queue_full; check_stop: + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -2195,6 +2215,7 @@ queue_status: transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } @@ -2289,6 +2310,7 @@ static void target_complete_ok_work(struct work_struct *work) if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2314,6 +2336,7 @@ static void target_complete_ok_work(struct work_struct *work) if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2349,6 +2372,7 @@ queue_rsp: if (ret) goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } @@ -2384,6 +2408,7 @@ queue_status: break; } + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -2710,6 +2735,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) */ if (cmd->state_active) target_remove_from_state_list(cmd); + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); } if (aborted) cmd->free_compl = &compl; @@ -2781,9 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref) struct completion *abrt_compl = se_cmd->abrt_compl; unsigned long flags; - if (se_cmd->lun_ref_active) - percpu_ref_put(&se_cmd->se_lun->lun_ref); - if (se_sess) { spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_del_init(&se_cmd->se_cmd_list); diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig index 4e32b6413b41..191f9715fa9a 100644 --- a/drivers/tee/amdtee/Kconfig +++ b/drivers/tee/amdtee/Kconfig @@ -3,6 +3,6 @@ config AMDTEE tristate "AMD-TEE" default m - depends on CRYPTO_DEV_SP_PSP + depends on CRYPTO_DEV_SP_PSP && CRYPTO_DEV_CCP_DD help This implements AMD's Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index 6370bb55f512..27b4cd77d0db 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -139,6 +139,9 @@ static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata, u32 index = get_session_index(session); struct amdtee_session *sess; + if (index >= TEE_NUM_SESSIONS) + return NULL; + list_for_each_entry(sess, &ctxdata->sess_list, list_node) if (ta_handle == sess->ta_handle && test_bit(index, sess->sess_mask)) @@ -212,6 +215,19 @@ unlock: return rc; } +static void destroy_session(struct kref *ref) +{ + struct amdtee_session *sess = container_of(ref, struct amdtee_session, + refcount); + + /* Unload the TA from TEE */ + handle_unload_ta(sess->ta_handle); + mutex_lock(&session_list_mutex); + list_del(&sess->list_node); + mutex_unlock(&session_list_mutex); + kfree(sess); +} + int amdtee_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, struct tee_param *param) @@ -236,15 +252,13 @@ int amdtee_open_session(struct tee_context *ctx, /* Load the TA binary into TEE environment */ handle_load_ta(ta, ta_size, arg); - if (arg->ret == TEEC_SUCCESS) { - mutex_lock(&session_list_mutex); - sess = alloc_session(ctxdata, arg->session); - mutex_unlock(&session_list_mutex); - } - if (arg->ret != TEEC_SUCCESS) goto out; + mutex_lock(&session_list_mutex); + sess = alloc_session(ctxdata, arg->session); + mutex_unlock(&session_list_mutex); + if (!sess) { rc = -ENOMEM; goto out; @@ -259,40 +273,29 @@ int amdtee_open_session(struct tee_context *ctx, if (i >= TEE_NUM_SESSIONS) { pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS); + kref_put(&sess->refcount, destroy_session); rc = -ENOMEM; goto out; } /* Open session with loaded TA */ handle_open_session(arg, &session_info, param); - - if (arg->ret == TEEC_SUCCESS) { - sess->session_info[i] = session_info; - set_session_id(sess->ta_handle, i, &arg->session); - } else { + if (arg->ret != TEEC_SUCCESS) { pr_err("open_session failed %d\n", arg->ret); spin_lock(&sess->lock); clear_bit(i, sess->sess_mask); spin_unlock(&sess->lock); + kref_put(&sess->refcount, destroy_session); + goto out; } + + sess->session_info[i] = session_info; + set_session_id(sess->ta_handle, i, &arg->session); out: free_pages((u64)ta, get_order(ta_size)); return rc; } -static void destroy_session(struct kref *ref) -{ - struct amdtee_session *sess = container_of(ref, struct amdtee_session, - refcount); - - /* Unload the TA from TEE */ - handle_unload_ta(sess->ta_handle); - mutex_lock(&session_list_mutex); - list_del(&sess->list_node); - mutex_unlock(&session_list_mutex); - kfree(sess); -} - int amdtee_close_session(struct tee_context *ctx, u32 session) { struct amdtee_context_data *ctxdata = ctx->data; diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index fe83d7a210d4..4ae8c856c88e 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -431,6 +431,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; + struct cpumask *cpus; + unsigned int frequency; + unsigned long max_capacity, capacity; + int ret; /* Request state should be less than max_level */ if (WARN_ON(state > cpufreq_cdev->max_level)) @@ -442,8 +446,19 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, cpufreq_cdev->cpufreq_state = state; - return freq_qos_update_request(&cpufreq_cdev->qos_req, - get_state_freq(cpufreq_cdev, state)); + frequency = get_state_freq(cpufreq_cdev, state); + + ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); + + if (ret > 0) { + cpus = cpufreq_cdev->policy->cpus; + max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus)); + capacity = frequency * max_capacity; + capacity /= cpufreq_cdev->policy->cpuinfo.max_freq; + arch_set_thermal_pressure(cpus, max_capacity - capacity); + } + + return ret; } /* Bind cpufreq callbacks to thermal cooling device ops */ diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c index 7130e90773ed..a478cff8162a 100644 --- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c @@ -19,6 +19,7 @@ #include <linux/acpi.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> +#include <linux/fs.h> #include "acpi_thermal_rel.h" static acpi_handle acpi_thermal_rel_handle; diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 53216dcbe173..f74b2473440d 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -651,7 +651,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { }; static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index 5d33b350da1c..d704fc104cfd 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -64,9 +64,6 @@ #include <asm/cpu_device_id.h> #include <asm/iosf_mbi.h> -#define X86_FAMILY_QUARK 0x5 -#define X86_MODEL_QUARK_X1000 0x9 - /* DTS reset is programmed via QRK_MBI_UNIT_SOC */ #define QRK_DTS_REG_OFFSET_RESET 0x34 #define QRK_DTS_RESET_BIT BIT(0) @@ -433,7 +430,7 @@ err_ret: } static const struct x86_cpu_id qrk_thermal_ids[] __initconst = { - { X86_VENDOR_INTEL, X86_FAMILY_QUARK, X86_MODEL_QUARK_X1000 }, + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids); diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c index f4be9c14e5d9..92e5c19d03f6 100644 --- a/drivers/thermal/intel/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel/intel_soc_dts_thermal.c @@ -36,8 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) } static const struct x86_cpu_id soc_thermal_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0, - BYT_SOC_DTS_APIC_IRQ}, + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), {} }; MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index ddb4a973c698..a006b9fd1d72 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -63,7 +63,7 @@ static int max_id __read_mostly; /* Array of zone pointers */ static struct zone_device **zones; /* Serializes interrupt notification, work and hotplug */ -static DEFINE_SPINLOCK(pkg_temp_lock); +static DEFINE_RAW_SPINLOCK(pkg_temp_lock); /* Protects zone operation in the work function against hotplug removal */ static DEFINE_MUTEX(thermal_zone_mutex); @@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) u64 msr_val, wr_val; mutex_lock(&thermal_zone_mutex); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); ++pkg_work_cnt; zonedev = pkg_temp_thermal_get_dev(cpu); if (!zonedev) { - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); mutex_unlock(&thermal_zone_mutex); return; } @@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) } enable_pkg_thres_interrupt(); - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* * If tzone is not NULL, then thermal_zone_mutex will prevent the @@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val) struct zone_device *zonedev; unsigned long flags; - spin_lock_irqsave(&pkg_temp_lock, flags); + raw_spin_lock_irqsave(&pkg_temp_lock, flags); ++pkg_interrupt_cnt; disable_pkg_thres_interrupt(); @@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val) pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); } - spin_unlock_irqrestore(&pkg_temp_lock, flags); + raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); return 0; } @@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) zonedev->msr_pkg_therm_high); cpumask_set_cpu(cpu, &zonedev->cpumask); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); zones[id] = zonedev; - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); return 0; } @@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) } /* Protect against work and interrupts */ - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * Check whether this cpu was the current target and store the new @@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) * To cancel the work we need to drop the lock, otherwise * we might deadlock if the work needs to be flushed. */ - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); cancel_delayed_work_sync(&zonedev->work); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * If this is not the last cpu in the package and the work * did not run after we dropped the lock above, then we @@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) pkg_thermal_schedule_work(target, &zonedev->work); } - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* Final cleanup if this is the last cpu */ if (lastcpu) @@ -478,7 +478,7 @@ static int pkg_thermal_cpu_online(unsigned int cpu) } static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS }, + X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_PTS, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index b7980c856898..68c1b93ac5d9 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -147,10 +147,10 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, for (ret = 0, i = 0; i < tb->nboot_acl; i++) { if (!uuid_is_null(&uuids[i])) - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb", + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb", &uuids[i]); - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s", + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); } diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 921d164b3f35..b451a5aa90b5 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -247,7 +247,7 @@ struct tb_drom_entry_header { struct tb_drom_entry_generic { struct tb_drom_entry_header header; - u8 data[0]; + u8 data[]; } __packed; struct tb_drom_entry_port { diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 13e88109742e..fbbe32ca1e69 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -114,7 +114,7 @@ struct icm_notification { struct ep_name_entry { u8 len; u8 type; - u8 data[0]; + u8 data[]; }; #define EP_NAME_INTEL_VSS 0x10 diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index ad5479f21174..a2ce99051c51 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -348,6 +348,12 @@ out: return ret; } +static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + return -EPERM; +} + static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) { @@ -393,6 +399,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, config.read_only = true; } else { config.name = "nvm_non_active"; + config.reg_read = tb_switch_nvm_no_read; config.reg_write = tb_switch_nvm_write; config.root_only = true; } @@ -947,7 +954,7 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width) ret = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_0, 1); if (ret) - return ret; + return false; widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index b341fc60c4ba..3d084cec136f 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -259,6 +259,7 @@ int usb4_switch_setup(struct tb_switch *sw) /** * usb4_switch_read_uid() - Read UID from USB4 router * @sw: USB4 router + * @uid: UID is stored here * * Reads 64-bit UID from USB4 router config space. */ @@ -296,6 +297,9 @@ static int usb4_switch_drom_read_block(struct tb_switch *sw, /** * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM * @sw: USB4 router + * @address: Byte address inside DROM to start reading + * @buf: Buffer where the DROM content is stored + * @size: Number of bytes to read from DROM * * Uses USB4 router operations to read router DROM. For devices this * should always work but for hosts it may return %-EOPNOTSUPP in which diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 42345e79920c..c5f0d936b003 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -18,6 +18,7 @@ #include <linux/sched.h> #include <linux/serdev.h> #include <linux/slab.h> +#include <linux/platform_data/x86/apple.h> static bool is_registered; static DEFINE_IDA(ctrl_ida); @@ -631,6 +632,15 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl, if (ret) return ret; + /* + * Apple machines provide an empty resource template, so on those + * machines just look for immediate children with a "baud" property + * (from the _DSM method) instead. + */ + if (!lookup.controller_handle && x86_apple_machine && + !acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL)) + acpi_get_parent(adev->handle, &lookup.controller_handle); + /* Make sure controller and ResourceSource handle match */ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle) return -ENODEV; diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index d1cdd2ab8b4c..d367803e2044 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx) { - const struct tty_port_client_operations *old_ops; struct serdev_controller *ctrl; struct serport *serport; int ret; @@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port, ctrl->ops = &ctrl_ops; - old_ops = port->client_ops; port->client_ops = &client_ops; port->client_data = ctrl; @@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port, err_reset_data: port->client_data = NULL; - port->client_ops = old_ops; + port->client_ops = &tty_port_default_client_ops; serdev_controller_put(ctrl); return ERR_PTR(ret); @@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port) return -ENODEV; serdev_controller_remove(ctrl); - port->client_ops = NULL; port->client_data = NULL; + port->client_ops = &tty_port_default_client_ops; serdev_controller_put(ctrl); return 0; diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index d657aa14c3e4..c33e02cbde93 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -446,7 +446,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev) port.port.line = rc; port.port.irq = irq_of_parse_and_map(np, 0); - port.port.irqflags = IRQF_SHARED; port.port.handle_irq = aspeed_vuart_handle_irq; port.port.iotype = UPIO_MEM; port.port.type = PORT_16550A; diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 0894a22fd702..f2a33c9082a6 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up) struct hlist_head *h; struct hlist_node *n; struct irq_info *i; - int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0; + int ret; mutex_lock(&hash_mutex); @@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up) INIT_LIST_HEAD(&up->list); i->head = &up->list; spin_unlock_irq(&i->lock); - irq_flags |= up->port.irqflags; ret = request_irq(up->port.irq, serial8250_interrupt, - irq_flags, up->port.name, i); + up->port.irqflags, up->port.name, i); if (ret < 0) serial_do_unlink(i, up); } diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 91e9b070d36d..d330da76d6b6 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -25,6 +25,14 @@ #include "8250.h" +#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052 +#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d +#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c +#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8 +#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2 +#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db +#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a @@ -677,6 +685,22 @@ static int __maybe_unused exar_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume); +static const struct exar8250_board acces_com_2x = { + .num_ports = 2, + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board acces_com_4x = { + .num_ports = 4, + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board acces_com_8x = { + .num_ports = 8, + .setup = pci_xr17c154_setup, +}; + + static const struct exar8250_board pbn_fastcom335_2 = { .num_ports = 2, .setup = pci_fastcom335_setup, @@ -745,6 +769,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = { } static const struct pci_device_id exar_pci_tbl[] = { + EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x), + EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x), + EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x), + + CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect), CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect), CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect), diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 531ad67395e0..f6687756ec5e 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -202,7 +202,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->type = type; port->uartclk = clk; - port->irqflags |= IRQF_SHARED; if (of_property_read_bool(np, "no-loopback-test")) port->flags |= UPF_SKIP_TEST; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 6f343ca08440..76fe72bfb8bb 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -569,7 +569,7 @@ static void omap8250_uart_qos_work(struct work_struct *work) struct omap8250_priv *priv; priv = container_of(work, struct omap8250_priv, qos_work); - pm_qos_update_request(&priv->pm_qos_request, priv->latency); + cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency); } #ifdef CONFIG_SERIAL_8250_DMA @@ -1222,10 +1222,9 @@ static int omap8250_probe(struct platform_device *pdev) DEFAULT_CLK_SPEED); } - priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; - priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; - pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, - priv->latency); + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency); INIT_WORK(&priv->qos_work, omap8250_uart_qos_work); spin_lock_init(&priv->rx_dma_lock); @@ -1295,7 +1294,7 @@ static int omap8250_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); serial8250_unregister_port(priv->line); - pm_qos_remove_request(&priv->pm_qos_request); + cpu_latency_qos_remove_request(&priv->pm_qos_request); device_init_wakeup(&pdev->dev, false); return 0; } @@ -1445,7 +1444,7 @@ static int omap8250_runtime_suspend(struct device *dev) if (up->dma && up->dma->rxchan) omap_8250_rx_dma_flush(up); - priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; schedule_work(&priv->qos_work); return 0; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 430e3467aff7..0325f2e53b74 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2177,6 +2177,10 @@ int serial8250_do_startup(struct uart_port *port) } } + /* Check if we need to have shared IRQs */ + if (port->irq && (up->port.flags & UPF_SHARE_IRQ)) + up->port.irqflags |= IRQF_SHARED; + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; /* diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 3bdd56a1021b..ea12f10610b6 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port, ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + /* reenable the UART */ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S, @@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port) ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + /* Enable RX interrupts */ up->ier = AR933X_UART_INT_RX_VALID; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index c15c398c88a9..a39c87a7c2e1 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -570,7 +570,8 @@ static void atmel_stop_tx(struct uart_port *port) atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); if (atmel_uart_is_half_duplex(port)) - atmel_start_rx(port); + if (!atomic_read(&atmel_port->tasklet_shutdown)) + atmel_start_rx(port); } diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 19d5a4cf29a6..d4b81b06e0cb 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1373,6 +1373,7 @@ static struct console cpm_scc_uart_console = { static int __init cpm_uart_console_init(void) { + cpm_muram_init(); register_console(&cpm_scc_uart_console); return 0; } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 91e2805e6441..c31b8f3db6bf 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -264,6 +264,7 @@ struct lpuart_port { int rx_dma_rng_buf_len; unsigned int dma_tx_nents; wait_queue_head_t dma_wait; + bool id_allocated; }; struct lpuart_soc_data { @@ -2390,6 +2391,8 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); +EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); +EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); #define LPUART_CONSOLE (&lpuart_console) #define LPUART32_CONSOLE (&lpuart32_console) @@ -2420,19 +2423,6 @@ static int lpuart_probe(struct platform_device *pdev) if (!sport) return -ENOMEM; - ret = of_alias_get_id(np, "serial"); - if (ret < 0) { - ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL); - if (ret < 0) { - dev_err(&pdev->dev, "port line is full, add device failed\n"); - return ret; - } - } - if (ret >= ARRAY_SIZE(lpuart_ports)) { - dev_err(&pdev->dev, "serial%d out of range\n", ret); - return -EINVAL; - } - sport->port.line = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sport->port.membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sport->port.membase)) @@ -2477,9 +2467,25 @@ static int lpuart_probe(struct platform_device *pdev) } } + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL); + if (ret < 0) { + dev_err(&pdev->dev, "port line is full, add device failed\n"); + return ret; + } + sport->id_allocated = true; + } + if (ret >= ARRAY_SIZE(lpuart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", ret); + ret = -EINVAL; + goto failed_out_of_range; + } + sport->port.line = ret; + ret = lpuart_enable_clks(sport); if (ret) - return ret; + goto failed_clock_enable; sport->port.uartclk = lpuart_get_baud_clk_rate(sport); lpuart_ports[sport->port.line] = sport; @@ -2529,6 +2535,10 @@ static int lpuart_probe(struct platform_device *pdev) failed_attach_port: failed_irq_request: lpuart_disable_clks(sport); +failed_clock_enable: +failed_out_of_range: + if (sport->id_allocated) + ida_simple_remove(&fsl_lpuart_ida, sport->port.line); return ret; } @@ -2538,7 +2548,8 @@ static int lpuart_remove(struct platform_device *pdev) uart_remove_one_port(&lpuart_reg, &sport->port); - ida_simple_remove(&fsl_lpuart_ida, sport->port.line); + if (sport->id_allocated) + ida_simple_remove(&fsl_lpuart_ida, sport->port.line); lpuart_disable_clks(sport); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 0c6c63166250..d337782b3648 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -599,7 +599,7 @@ static void imx_uart_dma_tx(struct imx_port *sport) sport->tx_bytes = uart_circ_chars_pending(xmit); - if (xmit->tail < xmit->head) { + if (xmit->tail < xmit->head || xmit->head == 0) { sport->dma_tx_nents = 1; sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); } else { diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index c12a12556339..4e9a590712cb 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev) port->membase = devm_ioremap_resource(&pdev->dev, reg); if (IS_ERR(port->membase)) - return -PTR_ERR(port->membase); + return PTR_ERR(port->membase); mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart), GFP_KERNEL); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 48017cec7f2f..e0b720ac754b 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -831,7 +831,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work) struct uart_omap_port *up = container_of(work, struct uart_omap_port, qos_work); - pm_qos_update_request(&up->pm_qos_request, up->latency); + cpu_latency_qos_update_request(&up->pm_qos_request, up->latency); } static void @@ -1722,10 +1722,9 @@ static int serial_omap_probe(struct platform_device *pdev) DEFAULT_CLK_SPEED); } - up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; - up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; - pm_qos_add_request(&up->pm_qos_request, - PM_QOS_CPU_DMA_LATENCY, up->latency); + up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + cpu_latency_qos_add_request(&up->pm_qos_request, up->latency); INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); platform_set_drvdata(pdev, up); @@ -1759,7 +1758,7 @@ err_add_port: pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - pm_qos_remove_request(&up->pm_qos_request); + cpu_latency_qos_remove_request(&up->pm_qos_request); device_init_wakeup(up->dev, false); err_rs485: err_port_line: @@ -1777,7 +1776,7 @@ static int serial_omap_remove(struct platform_device *dev) pm_runtime_dont_use_autosuspend(up->dev); pm_runtime_put_sync(up->dev); pm_runtime_disable(up->dev); - pm_qos_remove_request(&up->pm_qos_request); + cpu_latency_qos_remove_request(&up->pm_qos_request); device_init_wakeup(&dev->dev, false); return 0; @@ -1869,7 +1868,7 @@ static int serial_omap_runtime_suspend(struct device *dev) serial_omap_enable_wakeup(up, true); - up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; + up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; schedule_work(&up->qos_work); return 0; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 191abb18fc2a..0bd1684cabb3 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -129,6 +129,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop); static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop); static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port); static void qcom_geni_serial_stop_rx(struct uart_port *uport); +static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop); static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200, 32000000, 48000000, 64000000, 80000000, @@ -599,7 +600,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport) u32 irq_en; u32 status; struct qcom_geni_serial_port *port = to_dev_port(uport, uport); - u32 irq_clear = S_CMD_DONE_EN; + u32 s_irq_status; irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN); irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN); @@ -615,10 +616,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport) return; geni_se_cancel_s_cmd(&port->se); - qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, - S_GENI_CMD_CANCEL, false); + qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, + S_CMD_CANCEL_EN, true); + /* + * If timeout occurs secondary engine remains active + * and Abort sequence is executed. + */ + s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS); + /* Flush the Rx buffer */ + if (s_irq_status & S_RX_FIFO_LAST_EN) + qcom_geni_serial_handle_rx(uport, true); + writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR); + status = readl(uport->membase + SE_GENI_STATUS); - writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR); if (status & S_GENI_CMD_ACTIVE) qcom_geni_serial_abort_rx(uport); } diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 33034b852a51..8de8bac9c6c7 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -692,11 +692,22 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, count, DMA_TO_DEVICE); } +static void do_handle_rx_pio(struct tegra_uart_port *tup) +{ + struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); + struct tty_port *port = &tup->uport.state->port; + + tegra_uart_handle_rx_pio(tup, port); + if (tty) { + tty_flip_buffer_push(port); + tty_kref_put(tty); + } +} + static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup, unsigned int residue) { struct tty_port *port = &tup->uport.state->port; - struct tty_struct *tty = tty_port_tty_get(port); unsigned int count; async_tx_ack(tup->rx_dma_desc); @@ -705,11 +716,7 @@ static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup, /* If we are here, DMA is stopped */ tegra_uart_copy_rx_to_tty(tup, port, count); - tegra_uart_handle_rx_pio(tup, port); - if (tty) { - tty_flip_buffer_push(port); - tty_kref_put(tty); - } + do_handle_rx_pio(tup); } static void tegra_uart_rx_dma_complete(void *args) @@ -749,8 +756,10 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) { struct dma_tx_state state; - if (!tup->rx_dma_active) + if (!tup->rx_dma_active) { + do_handle_rx_pio(tup); return; + } dmaengine_terminate_all(tup->rx_dma_chan); dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); @@ -816,18 +825,6 @@ static void tegra_uart_handle_modem_signal_change(struct uart_port *u) uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS); } -static void do_handle_rx_pio(struct tegra_uart_port *tup) -{ - struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); - struct tty_port *port = &tup->uport.state->port; - - tegra_uart_handle_rx_pio(tup, port); - if (tty) { - tty_flip_buffer_push(port); - tty_kref_put(tty); - } -} - static irqreturn_t tegra_uart_isr(int irq, void *data) { struct tegra_uart_port *tup = data; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index a1453fe10862..5a6f36b391d9 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1589,9 +1589,7 @@ void tty_kclose(struct tty_struct *tty) tty_debug_hangup(tty, "freeing structure\n"); /* * The release_tty function takes care of the details of clearing - * the slots and preserving the termios structure. The tty_unlock_pair - * should be safe as we keep a kref while the tty is locked (so the - * unlock never unlocks a freed tty). + * the slots and preserving the termios structure. */ mutex_lock(&tty_mutex); tty_port_set_kopened(tty->port, 0); @@ -1621,9 +1619,7 @@ void tty_release_struct(struct tty_struct *tty, int idx) tty_debug_hangup(tty, "freeing structure\n"); /* * The release_tty function takes care of the details of clearing - * the slots and preserving the termios structure. The tty_unlock_pair - * should be safe as we keep a kref while the tty is locked (so the - * unlock never unlocks a freed tty). + * the slots and preserving the termios structure. */ mutex_lock(&tty_mutex); release_tty(tty, idx); @@ -2734,9 +2730,11 @@ static int compat_tty_tiocgserial(struct tty_struct *tty, struct serial_struct32 v32; struct serial_struct v; int err; - memset(&v, 0, sizeof(struct serial_struct)); - if (!tty->ops->set_serial) + memset(&v, 0, sizeof(v)); + memset(&v32, 0, sizeof(v32)); + + if (!tty->ops->get_serial) return -ENOTTY; err = tty->ops->get_serial(tty, &v); if (!err) { diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 044c3cbdcfa4..ea80bf872f54 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port) } } -static const struct tty_port_client_operations default_client_ops = { +const struct tty_port_client_operations tty_port_default_client_ops = { .receive_buf = tty_port_default_receive_buf, .write_wakeup = tty_port_default_wakeup, }; +EXPORT_SYMBOL_GPL(tty_port_default_client_ops); void tty_port_init(struct tty_port *port) { @@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port) spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; port->closing_wait = (3000 * HZ) / 100; - port->client_ops = &default_client_ops; + port->client_ops = &tty_port_default_client_ops; kref_init(&port->kref); } EXPORT_SYMBOL(tty_port_init); diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 78732feaf65b..d7d2e4b844bc 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -16,6 +16,7 @@ #include <linux/tty.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <linux/types.h> @@ -29,6 +30,8 @@ #include <linux/console.h> #include <linux/tty_flip.h> +#include <linux/sched/signal.h> + /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */ #define isspace(c) ((c) == ' ') @@ -43,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */ static int sel_end; static int sel_buffer_lth; static char *sel_buffer; +static DEFINE_MUTEX(sel_lock); /* clear_selection, highlight and highlight_pointer can be called from interrupt (via scrollback/front) */ @@ -177,14 +181,14 @@ int set_selection_user(const struct tiocl_selection __user *sel, return set_selection_kernel(&v, tty); } -int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) +static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) { struct vc_data *vc = vc_cons[fg_console].d; int new_sel_start, new_sel_end, spc; char *bp, *obp; int i, ps, pe, multiplier; u32 c; - int mode; + int mode, ret = 0; poke_blanked_console(); @@ -332,7 +336,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) } } sel_buffer_lth = bp - sel_buffer; - return 0; + + return ret; +} + +int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty) +{ + int ret; + + mutex_lock(&sel_lock); + console_lock(); + ret = __set_selection_kernel(v, tty); + console_unlock(); + mutex_unlock(&sel_lock); + + return ret; } EXPORT_SYMBOL_GPL(set_selection_kernel); @@ -350,6 +368,7 @@ int paste_selection(struct tty_struct *tty) unsigned int count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); + int ret = 0; console_lock(); poke_blanked_console(); @@ -361,10 +380,17 @@ int paste_selection(struct tty_struct *tty) tty_buffer_lock_exclusive(&vc->port); add_wait_queue(&vc->paste_wait, &wait); + mutex_lock(&sel_lock); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + ret = -EINTR; + break; + } if (tty_throttled(tty)) { + mutex_unlock(&sel_lock); schedule(); + mutex_lock(&sel_lock); continue; } __set_current_state(TASK_RUNNING); @@ -373,11 +399,12 @@ int paste_selection(struct tty_struct *tty) count); pasted += count; } + mutex_unlock(&sel_lock); remove_wait_queue(&vc->paste_wait, &wait); __set_current_state(TASK_RUNNING); tty_buffer_unlock_exclusive(&vc->port); tty_ldisc_deref(ld); - return 0; + return ret; } EXPORT_SYMBOL_GPL(paste_selection); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 35d21cdb60d0..15d27698054a 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc) WARN_CONSOLE_UNLOCKED(); set_origin(vc); - if (vc->vc_sw->con_flush_scrollback) + if (vc->vc_sw->con_flush_scrollback) { vc->vc_sw->con_flush_scrollback(vc); - else + } else if (con_is_visible(vc)) { + /* + * When no con_flush_scrollback method is provided then the + * legacy way for flushing the scrollback buffer is to use + * a side effect of the con_switch method. We do it only on + * the foreground console as background consoles have no + * scrollback buffers in that case and we obviously don't + * want to switch to them. + */ + hide_cursor(vc); vc->vc_sw->con_switch(vc); + set_cursor(vc); + } } /* @@ -3035,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) switch (type) { case TIOCL_SETSEL: - console_lock(); ret = set_selection_user((struct tiocl_selection __user *)(p+1), tty); - console_unlock(); break; case TIOCL_PASTESEL: ret = paste_selection(tty); diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 8b0ed139592f..ee6c91ef1f6c 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty, return -EINVAL; for (i = 0; i < MAX_NR_CONSOLES; i++) { + struct vc_data *vcp; + if (!vc_cons[i].d) continue; console_lock(); - if (v.v_vlin) - vc_cons[i].d->vc_scan_lines = v.v_vlin; - if (v.v_clin) - vc_cons[i].d->vc_font.height = v.v_clin; - vc_cons[i].d->vc_resize_user = 1; - vc_resize(vc_cons[i].d, v.v_cols, v.v_rows); + vcp = vc_cons[i].d; + if (vcp) { + if (v.v_vlin) + vcp->vc_scan_lines = v.v_vlin; + if (v.v_clin) + vcp->vc_font.height = v.v_clin; + vcp->vc_resize_user = 1; + vc_resize(vcp, v.v_cols, v.v_rows); + } console_unlock(); } break; diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 635cf0466b59..e9fed9a88737 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -350,7 +350,7 @@ struct l1_code { u8 string_header[E4_L1_STRING_HEADER]; u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER]; struct block_index page_header[E4_NO_SWAPPAGE_HEADERS]; - u8 code[0]; + u8 code[]; } __packed; /* structures describing a block within a DSP page */ diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h index d3bdc4cc47aa..d96658e2e209 100644 --- a/drivers/usb/atm/usbatm.h +++ b/drivers/usb/atm/usbatm.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /****************************************************************************** * usbatm.h - Generic USB xDSL driver core * @@ -164,7 +164,7 @@ struct usbatm_data { unsigned char *cell_buf; /* holds partial rx cell */ unsigned int buf_usage; - struct urb *urbs[0]; + struct urb *urbs[]; }; static inline void *to_usbatm_driver_data(struct usb_interface *intf) diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h index 3b181d4c7a03..6b6b04a3fe0f 100644 --- a/drivers/usb/c67x00/c67x00-hcd.h +++ b/drivers/usb/c67x00/c67x00-hcd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * c67x00-hcd.h: Cypress C67X00 USB HCD * diff --git a/drivers/usb/c67x00/c67x00.h b/drivers/usb/c67x00/c67x00.h index 7ce10928b037..a4456d0fffa9 100644 --- a/drivers/usb/c67x00/c67x00.h +++ b/drivers/usb/c67x00/c67x00.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * c67x00.h: Cypress C67X00 USB register and field definitions * diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c index b0a29efe7d31..deeea618ba33 100644 --- a/drivers/usb/cdns3/cdns3-pci-wrap.c +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -201,4 +201,4 @@ MODULE_DEVICE_TABLE(pci, cdns3_pci_ids); MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Cadence USBSS PCI wrapperr"); +MODULE_DESCRIPTION("Cadence USBSS PCI wrapper"); diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c index c6a79ca15858..5685ba11480b 100644 --- a/drivers/usb/cdns3/cdns3-ti.c +++ b/drivers/usb/cdns3/cdns3-ti.c @@ -52,8 +52,8 @@ enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE, struct cdns_ti { struct device *dev; void __iomem *usbss; - int usb2_only:1; - int vbus_divider:1; + unsigned usb2_only:1; + unsigned vbus_divider:1; struct clk *usb2_refclk; struct clk *lpm_clk; }; diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index c2123ef8d8a3..4aafba20f450 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -330,9 +330,9 @@ exit: * * Returns role */ -static enum usb_role cdns3_role_get(struct device *dev) +static enum usb_role cdns3_role_get(struct usb_role_switch *sw) { - struct cdns3 *cdns = dev_get_drvdata(dev); + struct cdns3 *cdns = usb_role_switch_get_drvdata(sw); return cdns->role; } @@ -346,9 +346,9 @@ static enum usb_role cdns3_role_get(struct device *dev) * - Role switch for dual-role devices * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices */ -static int cdns3_role_set(struct device *dev, enum usb_role role) +static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role) { - struct cdns3 *cdns = dev_get_drvdata(dev); + struct cdns3 *cdns = usb_role_switch_get_drvdata(sw); int ret = 0; pm_runtime_get_sync(cdns->dev); @@ -423,12 +423,6 @@ pm_put: return ret; } -static const struct usb_role_switch_desc cdns3_switch_desc = { - .set = cdns3_role_set, - .get = cdns3_role_get, - .allow_userspace_control = true, -}; - /** * cdns3_probe - probe for cdns3 core device * @pdev: Pointer to cdns3 core platform device @@ -437,6 +431,7 @@ static const struct usb_role_switch_desc cdns3_switch_desc = { */ static int cdns3_probe(struct platform_device *pdev) { + struct usb_role_switch_desc sw_desc = { }; struct device *dev = &pdev->dev; struct resource *res; struct cdns3 *cdns; @@ -529,7 +524,12 @@ static int cdns3_probe(struct platform_device *pdev) if (ret) goto err3; - cdns->role_sw = usb_role_switch_register(dev, &cdns3_switch_desc); + sw_desc.set = cdns3_role_set; + sw_desc.get = cdns3_role_get; + sw_desc.allow_userspace_control = true; + sw_desc.driver_data = cdns; + + cdns->role_sw = usb_role_switch_register(dev, &sw_desc); if (IS_ERR(cdns->role_sw)) { ret = PTR_ERR(cdns->role_sw); dev_warn(dev, "Unable to register Role Switch\n"); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 736b0c6e27fe..372460ea4df9 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1380,7 +1380,7 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; - struct cdns3_trb *trb = priv_req->trb; + struct cdns3_trb *trb; int current_index = 0; int handled = 0; int doorbell; @@ -2550,7 +2550,7 @@ found: /* Update ring only if removed request is on pending_req_list list */ if (req_on_hw_ring) { link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + - (priv_req->start_trb * TRB_SIZE)); + ((priv_req->end_trb + 1) * TRB_SIZE)); link_trb->control = (link_trb->control & TRB_CYCLE) | TRB_TYPE(TRB_LINK) | TRB_CHAIN; @@ -2595,11 +2595,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct usb_request *request; + struct cdns3_request *priv_req; + struct cdns3_trb *trb = NULL; int ret; int val; trace_cdns3_halt(priv_ep, 0, 0); + request = cdns3_next_request(&priv_ep->pending_req_list); + if (request) { + priv_req = to_cdns3_request(request); + trb = priv_req->trb; + if (trb) + trb->control = trb->control ^ TRB_CYCLE; + } + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); /* wait for EPRST cleared */ @@ -2610,10 +2620,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); - request = cdns3_next_request(&priv_ep->pending_req_list); - - if (request) + if (request) { + if (trb) + trb->control = trb->control ^ TRB_CYCLE; cdns3_rearm_transfer(priv_ep, 1); + } cdns3_start_all_request(priv_dev, priv_ep); return ret; diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h index f003a7801872..52765b098b9e 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/gadget.h @@ -1199,7 +1199,7 @@ struct cdns3_aligned_buf { void *buf; dma_addr_t dma; u32 size; - int in_use:1; + unsigned in_use:1; struct list_head list; }; @@ -1308,8 +1308,8 @@ struct cdns3_device { unsigned u2_allowed:1; unsigned is_selfpowered:1; unsigned setup_pending:1; - int hw_configured_flag:1; - int wake_up_flag:1; + unsigned hw_configured_flag:1; + unsigned wake_up_flag:1; unsigned status_completion_no_call:1; unsigned using_streams:1; int out_mem_is_allocated; diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h index 98da99510be7..b1540ce93264 100644 --- a/drivers/usb/chipidea/bits.h +++ b/drivers/usb/chipidea/bits.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * bits.h - register bits of the ChipIdea USB IP core * diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index d49d5e1235d0..644ecaef17ee 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * ci.h - common structures, functions, and macros of the ChipIdea driver * diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index d8e7eb2f97b9..a479af3ae31d 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -393,8 +393,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) } if (pdata.flags & CI_HDRC_PMQOS) - pm_qos_add_request(&data->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&data->pm_qos_req, 0); ret = imx_get_clks(dev); if (ret) @@ -478,7 +477,7 @@ disable_hsic_regulator: /* don't overwrite original ret (cf. EPROBE_DEFER) */ regulator_disable(data->hsic_pad_regulator); if (pdata.flags & CI_HDRC_PMQOS) - pm_qos_remove_request(&data->pm_qos_req); + cpu_latency_qos_remove_request(&data->pm_qos_req); data->ci_pdev = NULL; return ret; } @@ -499,7 +498,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) if (data->ci_pdev) { imx_disable_unprepare_clks(&pdev->dev); if (data->plat_data->flags & CI_HDRC_PMQOS) - pm_qos_remove_request(&data->pm_qos_req); + cpu_latency_qos_remove_request(&data->pm_qos_req); if (data->hsic_pad_regulator) regulator_disable(data->hsic_pad_regulator); } @@ -527,7 +526,7 @@ static int __maybe_unused imx_controller_suspend(struct device *dev) imx_disable_unprepare_clks(dev); if (data->plat_data->flags & CI_HDRC_PMQOS) - pm_qos_remove_request(&data->pm_qos_req); + cpu_latency_qos_remove_request(&data->pm_qos_req); data->in_lpm = true; @@ -547,8 +546,7 @@ static int __maybe_unused imx_controller_resume(struct device *dev) } if (data->plat_data->flags & CI_HDRC_PMQOS) - pm_qos_add_request(&data->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, 0); + cpu_latency_qos_add_request(&data->pm_qos_req, 0); ret = imx_prepare_enable_clks(dev); if (ret) diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h index de2aac9a2868..c2051aeba13f 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.h +++ b/drivers/usb/chipidea/ci_hdrc_imx.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright 2012 Freescale Semiconductor, Inc. */ diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 52139c2a9924..ae0bdc036464 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -600,9 +600,9 @@ static int ci_cable_notifier(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; } -static enum usb_role ci_usb_role_switch_get(struct device *dev) +static enum usb_role ci_usb_role_switch_get(struct usb_role_switch *sw) { - struct ci_hdrc *ci = dev_get_drvdata(dev); + struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw); enum usb_role role; unsigned long flags; @@ -613,9 +613,10 @@ static enum usb_role ci_usb_role_switch_get(struct device *dev) return role; } -static int ci_usb_role_switch_set(struct device *dev, enum usb_role role) +static int ci_usb_role_switch_set(struct usb_role_switch *sw, + enum usb_role role) { - struct ci_hdrc *ci = dev_get_drvdata(dev); + struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw); struct ci_hdrc_cable *cable = NULL; enum usb_role current_role = ci_role_to_usb_role(ci); enum ci_role ci_role = usb_role_to_ci_role(role); @@ -1118,6 +1119,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) } if (ci_role_switch.fwnode) { + ci_role_switch.driver_data = ci; ci->role_switch = usb_role_switch_register(dev, &ci_role_switch); if (IS_ERR(ci->role_switch)) { diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index fbfb02e05c97..be63924ea82e 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -170,6 +170,13 @@ static void ci_handle_id_switch(struct ci_hdrc *ci) dev_dbg(ci->dev, "switching from %s to %s\n", ci_role(ci)->name, ci->roles[role]->name); + if (ci->vbus_active && ci->role == CI_ROLE_GADGET) + /* + * vbus disconnect event is lost due to role + * switch occurs during system suspend. + */ + usb_gadget_vbus_disconnect(&ci->gadget); + ci_role_stop(ci); if (role == CI_ROLE_GADGET && diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h index 4f8b8179ec96..5e7a6e571dd2 100644 --- a/drivers/usb/chipidea/otg.h +++ b/drivers/usb/chipidea/otg.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. * diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h index 2b49d29bf2fb..1f5c5ae0e71e 100644 --- a/drivers/usb/chipidea/otg_fsm.h +++ b/drivers/usb/chipidea/otg_fsm.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. * diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ffaf46f5d062..921bcf14dc06 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1530,18 +1530,19 @@ static const struct usb_ep_ops usb_ep_ops = { static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active) { struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget); - unsigned long flags; if (is_active) { - pm_runtime_get_sync(&_gadget->dev); + pm_runtime_get_sync(ci->dev); hw_device_reset(ci); - spin_lock_irqsave(&ci->lock, flags); + spin_lock_irq(&ci->lock); if (ci->driver) { hw_device_state(ci, ci->ep0out->qh.dma); usb_gadget_set_state(_gadget, USB_STATE_POWERED); + spin_unlock_irq(&ci->lock); usb_udc_vbus_handler(_gadget, true); + } else { + spin_unlock_irq(&ci->lock); } - spin_unlock_irqrestore(&ci->lock, flags); } else { usb_udc_vbus_handler(_gadget, false); if (ci->driver) @@ -1551,7 +1552,7 @@ static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active) ci->platdata->notify_event(ci, CI_HDRC_CONTROLLER_STOPPED_EVENT); _gadget_stop_activity(&ci->gadget); - pm_runtime_put_sync(&_gadget->dev); + pm_runtime_put_sync(ci->dev); usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED); } } @@ -1636,12 +1637,12 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST) return 0; - pm_runtime_get_sync(&ci->gadget.dev); + pm_runtime_get_sync(ci->dev); if (is_on) hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS); else hw_write(ci, OP_USBCMD, USBCMD_RS, 0); - pm_runtime_put_sync(&ci->gadget.dev); + pm_runtime_put_sync(ci->dev); return 0; } @@ -1839,7 +1840,7 @@ static int ci_udc_stop(struct usb_gadget *gadget) CI_HDRC_CONTROLLER_STOPPED_EVENT); _gadget_stop_activity(&ci->gadget); spin_lock_irqsave(&ci->lock, flags); - pm_runtime_put(&ci->gadget.dev); + pm_runtime_put(ci->dev); } spin_unlock_irqrestore(&ci->lock, flags); @@ -1970,9 +1971,6 @@ static int udc_start(struct ci_hdrc *ci) if (retval) goto destroy_eps; - pm_runtime_no_callbacks(&ci->gadget.dev); - pm_runtime_enable(&ci->gadget.dev); - return retval; destroy_eps: diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h index e023735d94b7..ebb11b625bb8 100644 --- a/drivers/usb/chipidea/udc.h +++ b/drivers/usb/chipidea/udc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * udc.h - ChipIdea UDC structures * diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 62f4fb9b362f..84d6f7df09a4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -896,10 +896,10 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) ss->xmit_fifo_size = acm->writesize; ss->baud_base = le32_to_cpu(acm->line.dwDTERate); - ss->close_delay = acm->port.close_delay / 10; + ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : - acm->port.closing_wait / 10; + jiffies_to_msecs(acm->port.closing_wait) / 10; return 0; } @@ -907,17 +907,25 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct acm *acm = tty->driver_data; unsigned int closing_wait, close_delay; + unsigned int old_closing_wait, old_close_delay; int retval = 0; - close_delay = ss->close_delay * 10; + close_delay = msecs_to_jiffies(ss->close_delay * 10); closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10; + ASYNC_CLOSING_WAIT_NONE : + msecs_to_jiffies(ss->closing_wait * 10); + + /* we must redo the rounding here, so that the values match */ + old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; + old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + jiffies_to_msecs(acm->port.closing_wait) / 10; mutex_lock(&acm->port.mutex); if (!capable(CAP_SYS_ADMIN)) { - if ((close_delay != acm->port.close_delay) || - (closing_wait != acm->port.closing_wait)) + if ((ss->close_delay != old_close_delay) || + (ss->closing_wait != old_closing_wait)) retval = -EPERM; else retval = -EOPNOTSUPP; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 26bc05e48d8a..b7918f695434 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) { + struct usb_device *udev = to_usb_device(ddev); unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; @@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, goto skip_to_next_endpoint_or_interface_descriptor; } + /* Ignore blacklisted endpoints */ + if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { + if (usb_endpoint_is_blacklisted(udev, ifp, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, + d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } + } + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ifp->desc.bNumEndpoints; @@ -311,7 +322,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, j = 255; if (usb_endpoint_xfer_int(d)) { i = 1; - switch (to_usb_device(ddev)->speed) { + switch (udev->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: case USB_SPEED_HIGH: @@ -332,8 +343,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, /* * This quirk fixes bIntervals reported in ms. */ - if (to_usb_device(ddev)->quirks & - USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { + if (udev->quirks & USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval) + 3, i, j); i = j = n; } @@ -341,8 +351,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * This quirk fixes bIntervals reported in * linear microframes. */ - if (to_usb_device(ddev)->quirks & - USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) { + if (udev->quirks & USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval), i, j); i = j = n; } @@ -359,7 +368,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, } else if (usb_endpoint_xfer_isoc(d)) { i = 1; j = 16; - switch (to_usb_device(ddev)->speed) { + switch (udev->speed) { case USB_SPEED_HIGH: n = 7; /* 8 ms = 2^(7-1) uframes */ break; @@ -381,8 +390,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * explicitly forbidden by the USB spec. In an attempt to make * them usable, we will try treating them as Interrupt endpoints. */ - if (to_usb_device(ddev)->speed == USB_SPEED_LOW && - usb_endpoint_xfer_bulk(d)) { + if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { dev_warn(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X is Bulk; changing to Interrupt\n", cfgno, inum, asnum, d->bEndpointAddress); @@ -406,7 +414,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ - switch (to_usb_device(ddev)->speed) { + switch (udev->speed) { case USB_SPEED_LOW: maxpacket_maxes = low_speed_maxpacket_maxes; break; @@ -442,8 +450,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * maxpacket sizes other than 512. High speed HCDs may not * be able to handle that particular bug, so let's warn... */ - if (to_usb_device(ddev)->speed == USB_SPEED_HIGH - && usb_endpoint_xfer_bulk(d)) { + if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", @@ -452,7 +459,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, } /* Parse a possible SuperSpeed endpoint companion descriptor */ - if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER) + if (udev->speed >= USB_SPEED_SUPER) usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, buffer, size); diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 2b27d232d7a7..f81606c6a35b 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -261,9 +261,19 @@ static int usb_probe_device(struct device *dev) */ if (!udriver->supports_autosuspend) error = usb_autoresume_device(udev); + if (error) + return error; - if (!error) - error = udriver->probe(udev); + if (udriver->generic_subclass) + error = usb_generic_driver_probe(udev); + if (error) + return error; + + error = udriver->probe(udev); + if (error == -ENODEV && udriver != &usb_generic_driver) { + udev->use_generic_driver = 1; + return -EPROBE_DEFER; + } return error; } @@ -273,7 +283,10 @@ static int usb_unbind_device(struct device *dev) struct usb_device *udev = to_usb_device(dev); struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); - udriver->disconnect(udev); + if (udriver->disconnect) + udriver->disconnect(udev); + if (udriver->generic_subclass) + usb_generic_driver_disconnect(udev); if (!udriver->supports_autosuspend) usb_autosuspend_device(udev); return 0; @@ -790,17 +803,42 @@ const struct usb_device_id *usb_match_id(struct usb_interface *interface, } EXPORT_SYMBOL_GPL(usb_match_id); +const struct usb_device_id *usb_device_match_id(struct usb_device *udev, + const struct usb_device_id *id) +{ + if (!id) + return NULL; + + for (; id->idVendor || id->idProduct ; id++) { + if (usb_match_device(udev, id)) + return id; + } + + return NULL; +} + static int usb_device_match(struct device *dev, struct device_driver *drv) { /* devices and interfaces are handled separately */ if (is_usb_device(dev)) { + struct usb_device *udev; + struct usb_device_driver *udrv; /* interface drivers never match devices */ if (!is_usb_device_driver(drv)) return 0; - /* TODO: Add real matching code */ - return 1; + udev = to_usb_device(dev); + udrv = to_usb_device_driver(drv); + + if (udrv->id_table && + usb_device_match_id(udev, udrv->id_table) != NULL) { + return 1; + } + + if (udrv->match) + return udrv->match(udev); + return 0; } else if (is_usb_interface(dev)) { struct usb_interface *intf; @@ -1149,7 +1187,10 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) udev->do_remote_wakeup = 0; udriver = &usb_generic_driver; } - status = udriver->suspend(udev, msg); + if (udriver->suspend) + status = udriver->suspend(udev, msg); + if (status == 0 && udriver->generic_subclass) + status = usb_generic_driver_suspend(udev, msg); done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); @@ -1181,7 +1222,10 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) udev->reset_resume = 1; udriver = to_usb_device_driver(udev->dev.driver); - status = udriver->resume(udev, msg); + if (udriver->generic_subclass) + status = usb_generic_driver_resume(udev, msg); + if (status == 0 && udriver->resume) + status = udriver->resume(udev, msg); done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 38f8b3e31762..4626227a6dd2 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -195,7 +195,38 @@ int usb_choose_configuration(struct usb_device *udev) } EXPORT_SYMBOL_GPL(usb_choose_configuration); -static int generic_probe(struct usb_device *udev) +static int __check_usb_generic(struct device_driver *drv, void *data) +{ + struct usb_device *udev = data; + struct usb_device_driver *udrv; + + if (!is_usb_device_driver(drv)) + return 0; + udrv = to_usb_device_driver(drv); + if (udrv == &usb_generic_driver) + return 0; + if (!udrv->id_table) + return 0; + + return usb_device_match_id(udev, udrv->id_table) != NULL; +} + +static bool usb_generic_driver_match(struct usb_device *udev) +{ + if (udev->use_generic_driver) + return true; + + /* + * If any other driver wants the device, leave the device to this other + * driver. + */ + if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_usb_generic)) + return false; + + return true; +} + +int usb_generic_driver_probe(struct usb_device *udev) { int err, c; @@ -222,7 +253,7 @@ static int generic_probe(struct usb_device *udev) return 0; } -static void generic_disconnect(struct usb_device *udev) +void usb_generic_driver_disconnect(struct usb_device *udev) { usb_notify_remove_device(udev); @@ -234,7 +265,7 @@ static void generic_disconnect(struct usb_device *udev) #ifdef CONFIG_PM -static int generic_suspend(struct usb_device *udev, pm_message_t msg) +int usb_generic_driver_suspend(struct usb_device *udev, pm_message_t msg) { int rc; @@ -262,7 +293,7 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg) return rc; } -static int generic_resume(struct usb_device *udev, pm_message_t msg) +int usb_generic_driver_resume(struct usb_device *udev, pm_message_t msg) { int rc; @@ -285,11 +316,12 @@ static int generic_resume(struct usb_device *udev, pm_message_t msg) struct usb_device_driver usb_generic_driver = { .name = "usb", - .probe = generic_probe, - .disconnect = generic_disconnect, + .match = usb_generic_driver_match, + .probe = usb_generic_driver_probe, + .disconnect = usb_generic_driver_disconnect, #ifdef CONFIG_PM - .suspend = generic_suspend, - .resume = generic_resume, + .suspend = usb_generic_driver_suspend, + .resume = usb_generic_driver_resume, #endif .supports_autosuspend = 1, }; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3405b146edc9..54cd8ef795ec 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -38,7 +38,9 @@ #include "otg_whitelist.h" #define USB_VENDOR_GENESYS_LOGIC 0x05e3 +#define USB_VENDOR_SMSC 0x0424 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 +#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ @@ -986,13 +988,17 @@ int usb_remove_device(struct usb_device *udev) { struct usb_hub *hub; struct usb_interface *intf; + int ret; if (!udev->parent) /* Can't remove a root hub */ return -EINVAL; hub = usb_hub_to_struct_hub(udev->parent); intf = to_usb_interface(hub->intfdev); - usb_autopm_get_interface(intf); + ret = usb_autopm_get_interface(intf); + if (ret < 0) + return ret; + set_bit(udev->portnum, hub->removed_bits); hub_port_logical_disconnect(hub, udev->portnum); usb_autopm_put_interface(intf); @@ -1217,11 +1223,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif - /* Don't set the change_bits when the device - * was powered off. - */ - if (test_bit(port1, hub->power_bits)) - set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -1731,6 +1732,10 @@ static void hub_disconnect(struct usb_interface *intf) kfree(hub->buffer); pm_suspend_ignore_children(&intf->dev, false); + + if (hub->quirk_disable_autosuspend) + usb_autopm_put_interface(intf); + kref_put(&hub->kref, hub_release); } @@ -1863,6 +1868,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) hub->quirk_check_port_auto_suspend = 1; + if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) { + hub->quirk_disable_autosuspend = 1; + usb_autopm_get_interface_no_resume(intf); + } + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) return 0; @@ -5599,6 +5609,10 @@ out_hdev_lock: } static const struct usb_device_id hub_id_table[] = { + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, + .idVendor = USB_VENDOR_SMSC, + .bInterfaceClass = USB_CLASS_HUB, + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_GENESYS_LOGIC, diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index a9e24e4b8df1..a97dd1ba964e 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -61,6 +61,7 @@ struct usb_hub { unsigned quiescing:1; unsigned disconnected:1; unsigned in_reset:1; + unsigned quirk_disable_autosuspend:1; unsigned quirk_check_port_auto_suspend:1; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 5adf489428aa..d5f834f16993 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -5,6 +5,7 @@ * Released under the GPLv2 only. */ +#include <linux/acpi.h> #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> @@ -1941,6 +1942,7 @@ free_interfaces: intf->dev.of_node = usb_of_get_interface_node(dev, configuration, ifnum); } + ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev)); intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index bbbb35fa639f..235a7c645503 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev) if (!port_dev->is_superspeed && peer) pm_runtime_get_sync(&peer->dev); - usb_autopm_get_interface(intf); + retval = usb_autopm_get_interface(intf); + if (retval < 0) + return retval; + retval = usb_hub_set_port_power(hdev, hub, port1, true); msleep(hub_power_on_good_delay(hub)); if (udev && !retval) { @@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev) if (usb_port_block_power_off) return -EBUSY; - usb_autopm_get_interface(intf); + retval = usb_autopm_get_interface(intf); + if (retval < 0) + return retval; + retval = usb_hub_set_port_power(hdev, hub, port1, false); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION); if (!port_dev->is_superspeed) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6b6413073584..da30b5664ff3 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech PTZ Pro Camera */ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Screen Share */ + { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -354,6 +357,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0904, 0x6103), .driver_info = USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Sound Devices USBPre2 */ + { USB_DEVICE(0x0926, 0x0202), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -371,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0b05, 0x17e0), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Realtek hub in Dell WD19 (Type-C) */ + { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, + + /* Generic RTL8153 based ethernet adapters */ + { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -445,6 +458,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + /* novation SoundControl XL */ + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + { } /* terminating entry must be last */ }; @@ -472,6 +488,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { { } /* terminating entry must be last */ }; +/* + * Entries for blacklisted endpoints that should be ignored when parsing + * configuration descriptors. + * + * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. + */ +static const struct usb_device_id usb_endpoint_blacklist[] = { + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { } +}; + +bool usb_endpoint_is_blacklisted(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd) +{ + const struct usb_device_id *id; + unsigned int address; + + for (id = usb_endpoint_blacklist; id->match_flags; ++id) { + if (!usb_match_device(udev, id)) + continue; + + if (!usb_match_one_id_intf(udev, intf, id)) + continue; + + address = id->driver_info; + if (address == epd->bEndpointAddress) + return true; + } + + return false; +} + static bool usb_match_any_interface(struct usb_device *udev, const struct usb_device_id *id) { diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index f19694e69f5c..9f4320b9d7fc 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -849,7 +849,7 @@ static struct attribute *dev_string_attrs[] = { static umode_t dev_string_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct usb_device *udev = to_usb_device(dev); if (a == &dev_attr_manufacturer.attr) { @@ -883,7 +883,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct usb_device *udev = to_usb_device(dev); size_t nleft = count; size_t srclen, n; @@ -1233,7 +1233,7 @@ static struct attribute *intf_assoc_attrs[] = { static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct usb_interface *intf = to_usb_interface(dev); if (intf->intf_assoc == NULL) diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 9043d7242d67..50b2fc7fcc0e 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -86,7 +86,7 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle, { enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *upc; + union acpi_object *upc = NULL; acpi_status status; /* @@ -98,11 +98,12 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle, * no connectable, the port would be not used. */ status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer); + if (ACPI_FAILURE(status)) + goto out; + upc = buffer.pointer; - if (!upc || (upc->type != ACPI_TYPE_PACKAGE) - || upc->package.count != 4) { + if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4) goto out; - } if (upc->package.elements[0].integer.value) if (pld->user_visible) @@ -186,7 +187,7 @@ usb_acpi_find_companion_for_port(struct usb_port *port_dev) handle = adev->handle; status = acpi_get_physical_device_location(handle, &pld); - if (!ACPI_FAILURE(status) && pld) { + if (ACPI_SUCCESS(status) && pld) { port_dev->location = USB_ACPI_LOCATION_VALID | pld->group_token << 8 | pld->group_position; port_dev->connect_type = usb_acpi_get_connect_type(handle, pld); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index cf4783cf661a..64ed4023a8c8 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern void usb_release_quirk_list(void); +extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); extern int usb_get_device_descriptor(struct usb_device *dev, @@ -47,6 +50,12 @@ extern void usb_release_bos_descriptor(struct usb_device *dev); extern char *usb_cache_string(struct usb_device *udev, int index); extern int usb_set_configuration(struct usb_device *dev, int configuration); extern int usb_choose_configuration(struct usb_device *udev); +extern int usb_generic_driver_probe(struct usb_device *udev); +extern void usb_generic_driver_disconnect(struct usb_device *udev); +extern int usb_generic_driver_suspend(struct usb_device *udev, + pm_message_t msg); +extern int usb_generic_driver_resume(struct usb_device *udev, + pm_message_t msg); static inline unsigned usb_get_max_power(struct usb_device *udev, struct usb_host_config *c) @@ -63,6 +72,8 @@ extern int usb_match_one_id_intf(struct usb_device *dev, const struct usb_device_id *id); extern int usb_match_device(struct usb_device *dev, const struct usb_device_id *id); +extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev, + const struct usb_device_id *id); extern void usb_forced_unbind_intf(struct usb_interface *intf); extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 968e03b89d04..99b0bdfe0012 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -411,6 +411,10 @@ enum dwc2_ep0_state { * register. * 0 - Deactivate the transceiver (default) * 1 - Activate the transceiver + * @activate_stm_id_vb_detection: Activate external ID pin and Vbus level + * detection using GGPIO register. + * 0 - Deactivate the external level detection (default) + * 1 - Activate the external level detection * @g_dma: Enables gadget dma usage (default: autodetect). * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect). * @g_rx_fifo_size: The periodic rx fifo size for the device, in @@ -481,6 +485,7 @@ struct dwc2_core_params { bool service_interval; u8 hird_threshold; bool activate_stm_fs_transceiver; + bool activate_stm_id_vb_detection; bool ipg_isoc_en; u16 max_packet_count; u32 max_transfer_size; @@ -874,6 +879,8 @@ struct dwc2_hregs_backup { * removed once all SoCs support usb transceiver. * @supplies: Definition of USB power supplies * @vbus_supply: Regulator supplying vbus. + * @usb33d: Optional 3.3v regulator used on some stm32 devices to + * supply ID and VBUS detection hardware. * @lock: Spinlock that protects all the driver data structures * @priv: Stores a pointer to the struct usb_hcd * @queuing_high_bandwidth: True if multiple packets of a high-bandwidth @@ -1061,6 +1068,7 @@ struct dwc2_hsotg { struct dwc2_hsotg_plat *plat; struct regulator_bulk_data supplies[DWC2_NUM_SUPPLIES]; struct regulator *vbus_supply; + struct regulator *usb33d; spinlock_t lock; void *priv; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 88f7d6d4ff2d..12b98b466287 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, else packets = 1; /* send one packet if length is zero. */ - if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { - dev_err(hsotg->dev, "req length > maxpacket*mc\n"); - return; - } - if (dir_in && index != 0) if (hs_ep->isochronous) epsize = DXEPTSIZ_MC(packets); @@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, req->actual = 0; req->status = -EINPROGRESS; + /* Don't queue ISOC request if length greater than mps*mc */ + if (hs_ep->isochronous && + req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) { + dev_err(hs->dev, "req length > maxpacket*mc\n"); + return -EINVAL; + } + /* In DDMA mode for ISOC's don't queue request if length greater * than descriptor limits. */ @@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; struct dwc2_hsotg_ep *ep; __le16 reply; + u16 status; int ret; dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); @@ -1643,11 +1646,11 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - /* - * bit 0 => self powered - * bit 1 => remote wakeup - */ - reply = cpu_to_le16(0); + status = hsotg->gadget.is_selfpowered << + USB_DEVICE_SELF_POWERED; + status |= hsotg->remote_wakeup_allowed << + USB_DEVICE_REMOTE_WAKEUP; + reply = cpu_to_le16(status); break; case USB_RECIP_INTERFACE: @@ -1758,7 +1761,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, case USB_RECIP_DEVICE: switch (wValue) { case USB_DEVICE_REMOTE_WAKEUP: - hsotg->remote_wakeup_allowed = 1; + if (set) + hsotg->remote_wakeup_allowed = 1; + else + hsotg->remote_wakeup_allowed = 0; break; case USB_DEVICE_TEST_MODE: @@ -1768,16 +1774,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, return -EINVAL; hsotg->test_mode = wIndex >> 8; - ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); - if (ret) { - dev_err(hsotg->dev, - "%s: failed to send reply\n", __func__); - return ret; - } break; default: return -ENOENT; } + + ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); + if (ret) { + dev_err(hsotg->dev, + "%s: failed to send reply\n", __func__); + return ret; + } break; case USB_RECIP_ENDPOINT: @@ -4522,6 +4529,26 @@ static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget) } /** + * dwc2_hsotg_set_selfpowered - set if device is self/bus powered + * @gadget: The usb gadget state + * @is_selfpowered: Whether the device is self-powered + * + * Set if the device is self or bus powered. + */ +static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget, + int is_selfpowered) +{ + struct dwc2_hsotg *hsotg = to_hsotg(gadget); + unsigned long flags; + + spin_lock_irqsave(&hsotg->lock, flags); + gadget->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&hsotg->lock, flags); + + return 0; +} + +/** * dwc2_hsotg_pullup - connect/disconnect the USB PHY * @gadget: The usb gadget state * @is_on: Current state of the USB PHY @@ -4612,6 +4639,7 @@ static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = { .get_frame = dwc2_hsotg_gadget_getframe, + .set_selfpowered = dwc2_hsotg_set_selfpowered, .udc_start = dwc2_hsotg_udc_start, .udc_stop = dwc2_hsotg_udc_stop, .pullup = dwc2_hsotg_pullup, diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 8ca6d12a6f57..1224fa9df604 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -199,7 +199,7 @@ struct dwc2_hcd_urb { u32 flags; u16 interval; struct dwc2_hcd_pipe_info pipe_info; - struct dwc2_hcd_iso_packet_desc iso_descs[0]; + struct dwc2_hcd_iso_packet_desc iso_descs[]; }; /* Phases for control transfers */ diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 510e87ec0be8..c4027bbcedec 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -54,6 +54,12 @@ #define GOTGCTL_HSTSETHNPEN BIT(10) #define GOTGCTL_HNPREQ BIT(9) #define GOTGCTL_HSTNEGSCS BIT(8) +#define GOTGCTL_BVALOVAL BIT(7) +#define GOTGCTL_BVALOEN BIT(6) +#define GOTGCTL_AVALOVAL BIT(5) +#define GOTGCTL_AVALOEN BIT(4) +#define GOTGCTL_VBVALOVAL BIT(3) +#define GOTGCTL_VBVALOEN BIT(2) #define GOTGCTL_SESREQ BIT(1) #define GOTGCTL_SESREQSCS BIT(0) @@ -227,6 +233,8 @@ #define GPVNDCTL HSOTG_REG(0x0034) #define GGPIO HSOTG_REG(0x0038) #define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16) +#define GGPIO_STM32_OTG_GCCFG_VBDEN BIT(21) +#define GGPIO_STM32_OTG_GCCFG_IDEN BIT(22) #define GUID HSOTG_REG(0x003c) #define GSNPSID HSOTG_REG(0x0040) diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 31e090ac9f1e..8ccc83f7eb3f 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -163,6 +163,35 @@ static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg) p->host_perio_tx_fifo_size = 256; } +static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *p = &hsotg->params; + + p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; + p->speed = DWC2_SPEED_PARAM_FULL; + p->host_rx_fifo_size = 128; + p->host_nperio_tx_fifo_size = 96; + p->host_perio_tx_fifo_size = 96; + p->max_packet_count = 256; + p->phy_type = DWC2_PHY_TYPE_PARAM_FS; + p->i2c_enable = false; + p->activate_stm_fs_transceiver = true; + p->activate_stm_id_vb_detection = true; + p->power_down = DWC2_POWER_DOWN_PARAM_NONE; +} + +static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *p = &hsotg->params; + + p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; + p->activate_stm_id_vb_detection = true; + p->host_rx_fifo_size = 440; + p->host_nperio_tx_fifo_size = 256; + p->host_perio_tx_fifo_size = 256; + p->power_down = DWC2_POWER_DOWN_PARAM_NONE; +} + const struct of_device_id dwc2_of_match_table[] = { { .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params }, { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params }, @@ -186,6 +215,10 @@ const struct of_device_id dwc2_of_match_table[] = { { .compatible = "st,stm32f4x9-hsotg" }, { .compatible = "st,stm32f7-hsotg", .data = dwc2_set_stm32f7_hsotg_params }, + { .compatible = "st,stm32mp15-fsotg", + .data = dwc2_set_stm32mp15_fsotg_params }, + { .compatible = "st,stm32mp15-hsotg", + .data = dwc2_set_stm32mp15_hsotg_params }, {}, }; MODULE_DEVICE_TABLE(of, dwc2_of_match_table); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 3c6ce09a6db5..69972750e161 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -285,7 +285,9 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg) ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies), hsotg->supplies); if (ret) { - dev_err(hsotg->dev, "failed to request supplies: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(hsotg->dev, "failed to request supplies: %d\n", + ret); return ret; } return 0; @@ -312,6 +314,9 @@ static int dwc2_driver_remove(struct platform_device *dev) if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); + if (hsotg->params.activate_stm_id_vb_detection) + regulator_disable(hsotg->usb33d); + if (hsotg->ll_hw_enabled) dwc2_lowlevel_hw_disable(hsotg); @@ -392,8 +397,7 @@ static int dwc2_driver_probe(struct platform_device *dev) return retval; } - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - hsotg->regs = devm_ioremap_resource(&dev->dev, res); + hsotg->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res); if (IS_ERR(hsotg->regs)) return PTR_ERR(hsotg->regs); @@ -464,10 +468,35 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) goto error; + if (hsotg->params.activate_stm_id_vb_detection) { + u32 ggpio; + + hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d"); + if (IS_ERR(hsotg->usb33d)) { + retval = PTR_ERR(hsotg->usb33d); + if (retval != -EPROBE_DEFER) + dev_err(hsotg->dev, + "failed to request usb33d supply: %d\n", + retval); + goto error; + } + retval = regulator_enable(hsotg->usb33d); + if (retval) { + dev_err(hsotg->dev, + "failed to enable usb33d supply: %d\n", retval); + goto error; + } + + ggpio = dwc2_readl(hsotg, GGPIO); + ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN; + ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN; + dwc2_writel(hsotg, ggpio, GGPIO); + } + if (hsotg->dr_mode != USB_DR_MODE_HOST) { retval = dwc2_gadget_init(hsotg); if (retval) - goto error; + goto error_init; hsotg->gadget_enabled = 1; } @@ -493,7 +522,7 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) { if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); - goto error; + goto error_init; } hsotg->hcd_enabled = 1; } @@ -509,6 +538,9 @@ static int dwc2_driver_probe(struct platform_device *dev) return 0; +error_init: + if (hsotg->params.activate_stm_id_vb_detection) + regulator_disable(hsotg->usb33d); error: dwc2_lowlevel_hw_disable(hsotg); return retval; @@ -523,6 +555,37 @@ static int __maybe_unused dwc2_suspend(struct device *dev) if (is_device_mode) dwc2_hsotg_suspend(dwc2); + if (dwc2->params.activate_stm_id_vb_detection) { + unsigned long flags; + u32 ggpio, gotgctl; + + /* + * Need to force the mode to the current mode to avoid Mode + * Mismatch Interrupt when ID detection will be disabled. + */ + dwc2_force_mode(dwc2, !is_device_mode); + + spin_lock_irqsave(&dwc2->lock, flags); + gotgctl = dwc2_readl(dwc2, GOTGCTL); + /* bypass debounce filter, enable overrides */ + gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS; + gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN; + /* Force A / B session if needed */ + if (gotgctl & GOTGCTL_ASESVLD) + gotgctl |= GOTGCTL_AVALOVAL; + if (gotgctl & GOTGCTL_BSESVLD) + gotgctl |= GOTGCTL_BVALOVAL; + dwc2_writel(dwc2, gotgctl, GOTGCTL); + spin_unlock_irqrestore(&dwc2->lock, flags); + + ggpio = dwc2_readl(dwc2, GGPIO); + ggpio &= ~GGPIO_STM32_OTG_GCCFG_IDEN; + ggpio &= ~GGPIO_STM32_OTG_GCCFG_VBDEN; + dwc2_writel(dwc2, ggpio, GGPIO); + + regulator_disable(dwc2->usb33d); + } + if (dwc2->ll_hw_enabled && (is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) { ret = __dwc2_lowlevel_hw_disable(dwc2); @@ -544,6 +607,34 @@ static int __maybe_unused dwc2_resume(struct device *dev) } dwc2->phy_off_for_suspend = false; + if (dwc2->params.activate_stm_id_vb_detection) { + unsigned long flags; + u32 ggpio, gotgctl; + + ret = regulator_enable(dwc2->usb33d); + if (ret) + return ret; + + ggpio = dwc2_readl(dwc2, GGPIO); + ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN; + ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN; + dwc2_writel(dwc2, ggpio, GGPIO); + + /* ID/VBUS detection startup time */ + usleep_range(5000, 7000); + + spin_lock_irqsave(&dwc2->lock, flags); + gotgctl = dwc2_readl(dwc2, GOTGCTL); + gotgctl &= ~GOTGCTL_DBNCE_FLTR_BYPASS; + gotgctl &= ~(GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | + GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL); + dwc2_writel(dwc2, gotgctl, GOTGCTL); + spin_unlock_irqrestore(&dwc2->lock, flags); + } + + /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */ + dwc2_force_dr_mode(dwc2); + if (dwc2_is_device_mode(dwc2)) ret = dwc2_hsotg_resume(dwc2); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 1d85c42b9c67..edc17155cb2b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -289,12 +289,6 @@ done: return 0; } -static const struct clk_bulk_data dwc3_core_clks[] = { - { .id = "ref" }, - { .id = "bus_early" }, - { .id = "suspend" }, -}; - /* * dwc3_frame_length_adjustment - Adjusts frame length if required * @dwc3: Pointer to our controller context structure @@ -1029,6 +1023,9 @@ static int dwc3_core_init(struct dwc3 *dwc) if (dwc->dis_tx_ipgap_linecheck_quirk) reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + if (dwc->parkmode_disable_ss_quirk) + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } @@ -1342,6 +1339,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) "snps,dis-del-phy-power-chg-quirk"); dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, "snps,dis-tx-ipgap-linecheck-quirk"); + dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, + "snps,parkmode-disable-ss-quirk"); dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, "snps,tx_de_emphasis_quirk"); @@ -1441,11 +1440,6 @@ static int dwc3_probe(struct platform_device *pdev) if (!dwc) return -ENOMEM; - dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks), - GFP_KERNEL); - if (!dwc->clks) - return -ENOMEM; - dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1476,22 +1470,23 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); - dwc->reset = devm_reset_control_get_optional_shared(dev, NULL); + dwc->reset = devm_reset_control_array_get(dev, true, true); if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); if (dev->of_node) { - dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); - - ret = devm_clk_bulk_get(dev, dwc->num_clks, dwc->clks); + ret = devm_clk_bulk_get_all(dev, &dwc->clks); if (ret == -EPROBE_DEFER) return ret; /* * Clocks are optional, but new DT platforms should support all * clocks as required by the DT-binding. */ - if (ret) + if (ret < 0) dwc->num_clks = 0; + else + dwc->num_clks = ret; + } ret = reset_control_deassert(dwc->reset); @@ -1637,6 +1632,8 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: + if (pm_runtime_suspended(dwc->dev)) + break; spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); spin_unlock_irqrestore(&dwc->lock, flags); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 77c4a9abe365..6846eb0cba13 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -25,6 +25,7 @@ #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> +#include <linux/usb/role.h> #include <linux/ulpi/interface.h> #include <linux/phy/phy.h> @@ -249,6 +250,7 @@ #define DWC3_GUCTL_HSTINAUTORETRY BIT(14) /* Global User Control 1 Register */ +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) @@ -953,6 +955,9 @@ struct dwc3_scratchpad_array { * @hsphy_mode: UTMI phy mode, one of following: * - USBPHY_INTERFACE_MODE_UTMI * - USBPHY_INTERFACE_MODE_UTMIW + * @role_sw: usb_role_switch handle + * @role_switch_default_mode: default operation mode of controller while + * usb role is USB_ROLE_NONE. * @usb2_phy: pointer to USB2 PHY * @usb3_phy: pointer to USB3 PHY * @usb2_generic_phy: pointer to USB2 PHY @@ -1024,6 +1029,8 @@ struct dwc3_scratchpad_array { * change quirk. * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate * check during HS transmit. + * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed + * instances in park mode. * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk * @tx_de_emphasis: Tx de-emphasis value * 0 - -6dB de-emphasis @@ -1086,6 +1093,8 @@ struct dwc3 { struct extcon_dev *edev; struct notifier_block edev_nb; enum usb_phy_interface hsphy_mode; + struct usb_role_switch *role_sw; + enum usb_dr_mode role_switch_default_mode; u32 fladj; u32 irq_gadget; @@ -1215,6 +1224,7 @@ struct dwc3 { unsigned dis_u2_freeclk_exists_quirk:1; unsigned dis_del_phy_power_chg_quirk:1; unsigned dis_tx_ipgap_linecheck_quirk:1; + unsigned parkmode_disable_ss_quirk:1; unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index e56beb9d1e36..4a13ceaf4093 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size, u8 epnum = event->endpoint_number; size_t len; int status; - int ret; - ret = snprintf(str, size, "ep%d%s: ", epnum >> 1, + len = scnprintf(str, size, "ep%d%s: ", epnum >> 1, (epnum & 1) ? "in" : "out"); - if (ret < 0) - return "UNKNOWN"; status = event->status; switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: - len = strlen(str); - snprintf(str + len, size - len, "Transfer Complete (%c%c%c)", + len += scnprintf(str + len, size - len, + "Transfer Complete (%c%c%c)", status & DEPEVT_STATUS_SHORT ? 'S' : 's', status & DEPEVT_STATUS_IOC ? 'I' : 'i', status & DEPEVT_STATUS_LST ? 'L' : 'l'); - len = strlen(str); - if (epnum <= 1) - snprintf(str + len, size - len, " [%s]", + scnprintf(str + len, size - len, " [%s]", dwc3_ep0_state_string(ep0state)); break; case DWC3_DEPEVT_XFERINPROGRESS: - len = strlen(str); - - snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)", + scnprintf(str + len, size - len, + "Transfer In Progress [%d] (%c%c%c)", event->parameters, status & DEPEVT_STATUS_SHORT ? 'S' : 's', status & DEPEVT_STATUS_IOC ? 'I' : 'i', status & DEPEVT_STATUS_LST ? 'M' : 'm'); break; case DWC3_DEPEVT_XFERNOTREADY: - len = strlen(str); - - snprintf(str + len, size - len, "Transfer Not Ready [%d]%s", + len += scnprintf(str + len, size - len, + "Transfer Not Ready [%d]%s", event->parameters, status & DEPEVT_STATUS_TRANSFER_ACTIVE ? " (Active)" : " (Not Active)"); - len = strlen(str); - /* Control Endpoints */ if (epnum <= 1) { int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status); switch (phase) { case DEPEVT_STATUS_CONTROL_DATA: - snprintf(str + ret, size - ret, + scnprintf(str + len, size - len, " [Data Phase]"); break; case DEPEVT_STATUS_CONTROL_STATUS: - snprintf(str + ret, size - ret, + scnprintf(str + len, size - len, " [Status Phase]"); } } break; case DWC3_DEPEVT_RXTXFIFOEVT: - snprintf(str + ret, size - ret, "FIFO"); + scnprintf(str + len, size - len, "FIFO"); break; case DWC3_DEPEVT_STREAMEVT: status = event->status; switch (status) { case DEPEVT_STREAMEVT_FOUND: - snprintf(str + ret, size - ret, " Stream %d Found", + scnprintf(str + len, size - len, " Stream %d Found", event->parameters); break; case DEPEVT_STREAMEVT_NOTFOUND: default: - snprintf(str + ret, size - ret, " Stream Not Found"); + scnprintf(str + len, size - len, " Stream Not Found"); break; } break; case DWC3_DEPEVT_EPCMDCMPLT: - snprintf(str + ret, size - ret, "Endpoint Command Complete"); + scnprintf(str + len, size - len, "Endpoint Command Complete"); break; default: - snprintf(str, size, "UNKNOWN"); + scnprintf(str + len, size - len, "UNKNOWN"); } return str; diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index c946d64142ad..7db1ffc92bbd 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -476,6 +476,94 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) return edev; } +#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH) +#define ROLE_SWITCH 1 +static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, + enum usb_role role) +{ + struct dwc3 *dwc = usb_role_switch_get_drvdata(sw); + u32 mode; + + switch (role) { + case USB_ROLE_HOST: + mode = DWC3_GCTL_PRTCAP_HOST; + break; + case USB_ROLE_DEVICE: + mode = DWC3_GCTL_PRTCAP_DEVICE; + break; + default: + if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) + mode = DWC3_GCTL_PRTCAP_HOST; + else + mode = DWC3_GCTL_PRTCAP_DEVICE; + break; + } + + dwc3_set_mode(dwc, mode); + return 0; +} + +static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw) +{ + struct dwc3 *dwc = usb_role_switch_get_drvdata(sw); + unsigned long flags; + enum usb_role role; + + spin_lock_irqsave(&dwc->lock, flags); + switch (dwc->current_dr_role) { + case DWC3_GCTL_PRTCAP_HOST: + role = USB_ROLE_HOST; + break; + case DWC3_GCTL_PRTCAP_DEVICE: + role = USB_ROLE_DEVICE; + break; + case DWC3_GCTL_PRTCAP_OTG: + role = dwc->current_otg_role; + break; + default: + if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) + role = USB_ROLE_HOST; + else + role = USB_ROLE_DEVICE; + break; + } + spin_unlock_irqrestore(&dwc->lock, flags); + return role; +} + +static int dwc3_setup_role_switch(struct dwc3 *dwc) +{ + struct usb_role_switch_desc dwc3_role_switch = {NULL}; + const char *str; + u32 mode; + int ret; + + ret = device_property_read_string(dwc->dev, "role-switch-default-mode", + &str); + if (ret >= 0 && !strncmp(str, "host", strlen("host"))) { + dwc->role_switch_default_mode = USB_DR_MODE_HOST; + mode = DWC3_GCTL_PRTCAP_HOST; + } else { + dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL; + mode = DWC3_GCTL_PRTCAP_DEVICE; + } + + dwc3_role_switch.fwnode = dev_fwnode(dwc->dev); + dwc3_role_switch.set = dwc3_usb_role_switch_set; + dwc3_role_switch.get = dwc3_usb_role_switch_get; + dwc3_role_switch.driver_data = dwc; + dwc->role_sw = usb_role_switch_register(dwc->dev, &dwc3_role_switch); + if (IS_ERR(dwc->role_sw)) + return PTR_ERR(dwc->role_sw); + + dwc3_set_mode(dwc, mode); + return 0; +} +#else +#define ROLE_SWITCH 0 +#define dwc3_setup_role_switch(x) 0 +#endif + int dwc3_drd_init(struct dwc3 *dwc) { int ret, irq; @@ -484,7 +572,12 @@ int dwc3_drd_init(struct dwc3 *dwc) if (IS_ERR(dwc->edev)) return PTR_ERR(dwc->edev); - if (dwc->edev) { + if (ROLE_SWITCH && + device_property_read_bool(dwc->dev, "usb-role-switch")) { + ret = dwc3_setup_role_switch(dwc); + if (ret < 0) + return ret; + } else if (dwc->edev) { dwc->edev_nb.notifier_call = dwc3_drd_notifier; ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, &dwc->edev_nb); @@ -531,6 +624,9 @@ void dwc3_drd_exit(struct dwc3 *dwc) { unsigned long flags; + if (dwc->role_sw) + usb_role_switch_unregister(dwc->role_sw); + if (dwc->edev) extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST, &dwc->edev_nb); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 90bb022737da..48b68b6f0dc8 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -162,6 +162,12 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = { .suspend_clk_idx = -1, }; +static const struct dwc3_exynos_driverdata exynos5420_drvdata = { + .clk_names = { "usbdrd30", "usbdrd30_susp_clk"}, + .num_clks = 2, + .suspend_clk_idx = 1, +}; + static const struct dwc3_exynos_driverdata exynos5433_drvdata = { .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" }, .num_clks = 4, @@ -179,6 +185,9 @@ static const struct of_device_id exynos_dwc3_match[] = { .compatible = "samsung,exynos5250-dwusb3", .data = &exynos5250_drvdata, }, { + .compatible = "samsung,exynos5420-dwusb3", + .data = &exynos5420_drvdata, + }, { .compatible = "samsung,exynos5433-dwusb3", .data = &exynos5433_drvdata, }, { diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index 8a3ec1a951fe..b81d085bc534 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -107,10 +107,37 @@ static const char *phy_names[PHY_COUNT] = { "usb2-phy0", "usb2-phy1", "usb3-phy0", }; +static struct clk_bulk_data meson_g12a_clocks[] = { + { .id = NULL }, +}; + +static struct clk_bulk_data meson_a1_clocks[] = { + { .id = "usb_ctrl" }, + { .id = "usb_bus" }, + { .id = "xtal_usb_ctrl" }, +}; + +struct dwc3_meson_g12a_drvdata { + bool otg_switch_supported; + struct clk_bulk_data *clks; + int num_clks; +}; + +static struct dwc3_meson_g12a_drvdata g12a_drvdata = { + .otg_switch_supported = true, + .clks = meson_g12a_clocks, + .num_clks = ARRAY_SIZE(meson_g12a_clocks), +}; + +static struct dwc3_meson_g12a_drvdata a1_drvdata = { + .otg_switch_supported = false, + .clks = meson_a1_clocks, + .num_clks = ARRAY_SIZE(meson_a1_clocks), +}; + struct dwc3_meson_g12a { struct device *dev; struct regmap *regmap; - struct clk *clk; struct reset_control *reset; struct phy *phys[PHY_COUNT]; enum usb_dr_mode otg_mode; @@ -120,6 +147,7 @@ struct dwc3_meson_g12a { struct regulator *vbus; struct usb_role_switch_desc switch_desc; struct usb_role_switch *role_switch; + const struct dwc3_meson_g12a_drvdata *drvdata; }; static void dwc3_meson_g12a_usb2_set_mode(struct dwc3_meson_g12a *priv, @@ -151,7 +179,7 @@ static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv) U2P_R0_POWER_ON_RESET, U2P_R0_POWER_ON_RESET); - if (i == USB2_OTG_PHY) { + if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) { regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i), U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS, @@ -295,7 +323,7 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv, { int ret; - if (!priv->phys[USB2_OTG_PHY]) + if (!priv->drvdata->otg_switch_supported || !priv->phys[USB2_OTG_PHY]) return -EINVAL; if (mode == PHY_MODE_USB_HOST) @@ -321,9 +349,10 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv, return 0; } -static int dwc3_meson_g12a_role_set(struct device *dev, enum usb_role role) +static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw, + enum usb_role role) { - struct dwc3_meson_g12a *priv = dev_get_drvdata(dev); + struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw); enum phy_mode mode; if (role == USB_ROLE_NONE) @@ -338,9 +367,9 @@ static int dwc3_meson_g12a_role_set(struct device *dev, enum usb_role role) return dwc3_meson_g12a_otg_mode_set(priv, mode); } -static enum usb_role dwc3_meson_g12a_role_get(struct device *dev) +static enum usb_role dwc3_meson_g12a_role_get(struct usb_role_switch *sw) { - struct dwc3_meson_g12a *priv = dev_get_drvdata(dev); + struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw); return priv->otg_phy_mode == PHY_MODE_USB_HOST ? USB_ROLE_HOST : USB_ROLE_DEVICE; @@ -380,14 +409,61 @@ static struct device *dwc3_meson_g12_find_child(struct device *dev, return &pdev->dev; } +static int dwc3_meson_g12a_otg_init(struct platform_device *pdev, + struct dwc3_meson_g12a *priv) +{ + enum phy_mode otg_id; + int ret, irq; + struct device *dev = &pdev->dev; + + if (!priv->drvdata->otg_switch_supported) + return 0; + + if (priv->otg_mode == USB_DR_MODE_OTG) { + /* Ack irq before registering */ + regmap_update_bits(priv->regmap, USB_R5, + USB_R5_ID_DIG_IRQ, 0); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + dwc3_meson_g12a_irq_thread, + IRQF_ONESHOT, pdev->name, priv); + if (ret) + return ret; + } + + /* Setup OTG mode corresponding to the ID pin */ + if (priv->otg_mode == USB_DR_MODE_OTG) { + otg_id = dwc3_meson_g12a_get_id(priv); + if (otg_id != priv->otg_phy_mode) { + if (dwc3_meson_g12a_otg_mode_set(priv, otg_id)) + dev_warn(dev, "Failed to switch OTG mode\n"); + } + } + + /* Setup role switcher */ + priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev, + "snps,dwc3"); + priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2"); + priv->switch_desc.allow_userspace_control = true; + priv->switch_desc.set = dwc3_meson_g12a_role_set; + priv->switch_desc.get = dwc3_meson_g12a_role_get; + priv->switch_desc.driver_data = priv; + + priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc); + if (IS_ERR(priv->role_switch)) + dev_warn(dev, "Unable to register Role Switch\n"); + + return 0; +} + static int dwc3_meson_g12a_probe(struct platform_device *pdev) { struct dwc3_meson_g12a *priv; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; void __iomem *base; - enum phy_mode otg_id; - int ret, i, irq; + int ret, i; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -409,17 +485,18 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) priv->vbus = NULL; } - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + priv->drvdata = of_device_get_match_data(&pdev->dev); - ret = clk_prepare_enable(priv->clk); + ret = devm_clk_bulk_get(dev, + priv->drvdata->num_clks, + priv->drvdata->clks); if (ret) return ret; - devm_add_action_or_reset(dev, - (void(*)(void *))clk_disable_unprepare, - priv->clk); + ret = clk_bulk_prepare_enable(priv->drvdata->num_clks, + priv->drvdata->clks); + if (ret) + return ret; platform_set_drvdata(pdev, priv); priv->dev = dev; @@ -433,41 +510,28 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) ret = reset_control_reset(priv->reset); if (ret) - return ret; + goto err_disable_clks; ret = dwc3_meson_g12a_get_phys(priv); if (ret) - return ret; + goto err_disable_clks; if (priv->vbus) { ret = regulator_enable(priv->vbus); if (ret) - return ret; + goto err_disable_clks; } /* Get dr_mode */ priv->otg_mode = usb_get_dr_mode(dev); - if (priv->otg_mode == USB_DR_MODE_OTG) { - /* Ack irq before registering */ - regmap_update_bits(priv->regmap, USB_R5, - USB_R5_ID_DIG_IRQ, 0); - - irq = platform_get_irq(pdev, 0); - ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, - dwc3_meson_g12a_irq_thread, - IRQF_ONESHOT, pdev->name, priv); - if (ret) - return ret; - } - dwc3_meson_g12a_usb_init(priv); /* Init PHYs */ for (i = 0 ; i < PHY_COUNT ; ++i) { ret = phy_init(priv->phys[i]); if (ret) - return ret; + goto err_disable_clks; } /* Set PHY Power */ @@ -478,31 +542,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) } ret = of_platform_populate(np, NULL, NULL, dev); - if (ret) { - clk_disable_unprepare(priv->clk); + if (ret) goto err_phys_power; - } - /* Setup OTG mode corresponding to the ID pin */ - if (priv->otg_mode == USB_DR_MODE_OTG) { - otg_id = dwc3_meson_g12a_get_id(priv); - if (otg_id != priv->otg_phy_mode) { - if (dwc3_meson_g12a_otg_mode_set(priv, otg_id)) - dev_warn(dev, "Failed to switch OTG mode\n"); - } - } - - /* Setup role switcher */ - priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev, - "snps,dwc3"); - priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2"); - priv->switch_desc.allow_userspace_control = true; - priv->switch_desc.set = dwc3_meson_g12a_role_set; - priv->switch_desc.get = dwc3_meson_g12a_role_get; - - priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc); - if (IS_ERR(priv->role_switch)) - dev_warn(dev, "Unable to register Role Switch\n"); + ret = dwc3_meson_g12a_otg_init(pdev, priv); + if (ret) + goto err_phys_power; pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -518,6 +563,10 @@ err_phys_exit: for (i = 0 ; i < PHY_COUNT ; ++i) phy_exit(priv->phys[i]); +err_disable_clks: + clk_bulk_disable_unprepare(priv->drvdata->num_clks, + priv->drvdata->clks); + return ret; } @@ -527,7 +576,8 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int i; - usb_role_switch_unregister(priv->role_switch); + if (priv->drvdata->otg_switch_supported) + usb_role_switch_unregister(priv->role_switch); of_platform_depopulate(dev); @@ -540,6 +590,9 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev) pm_runtime_put_noidle(dev); pm_runtime_set_suspended(dev); + clk_bulk_disable_unprepare(priv->drvdata->num_clks, + priv->drvdata->clks); + return 0; } @@ -547,7 +600,8 @@ static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev) { struct dwc3_meson_g12a *priv = dev_get_drvdata(dev); - clk_disable(priv->clk); + clk_bulk_disable_unprepare(priv->drvdata->num_clks, + priv->drvdata->clks); return 0; } @@ -556,7 +610,8 @@ static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev) { struct dwc3_meson_g12a *priv = dev_get_drvdata(dev); - return clk_enable(priv->clk); + return clk_bulk_prepare_enable(priv->drvdata->num_clks, + priv->drvdata->clks); } static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev) @@ -619,7 +674,14 @@ static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = { }; static const struct of_device_id dwc3_meson_g12a_match[] = { - { .compatible = "amlogic,meson-g12a-usb-ctrl" }, + { + .compatible = "amlogic,meson-g12a-usb-ctrl", + .data = &g12a_drvdata, + }, + { + .compatible = "amlogic,meson-a1-usb-ctrl", + .data = &a1_drvdata, + }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 261af9e38ddd..1dfd024cd06b 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -9,7 +9,7 @@ #include <linux/of.h> #include <linux/clk.h> #include <linux/irq.h> -#include <linux/clk-provider.h> +#include <linux/of_clk.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/extcon.h> diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 1b8014ab0b25..4d3c79d90a6e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1071,7 +1071,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, unsigned int rem = length % maxp; unsigned chain = true; - if (sg_is_last(s)) + /* + * IOMMU driver is coalescing the list of sgs which shares a + * page boundary into one and giving it to USB driver. With + * this the number of sgs mapped is not equal to the number of + * sgs passed. So mark the chain bit to false if it isthe last + * mapped sg. + */ + if (i == remaining - 1) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1514,7 +1521,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r for (i = 0; i < req->num_trbs; i++) { struct dwc3_trb *trb; - trb = req->trb + i; + trb = &dep->trb_pool[dep->trb_dequeue]; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } @@ -2429,7 +2436,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_SHORT && !chain) return 1; - if (event->status & DEPEVT_STATUS_IOC) + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || + (trb->ctrl & DWC3_TRB_CTRL_LST)) return 1; return 0; @@ -2562,10 +2570,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); - if (stop) { + if (stop) dwc3_stop_active_transfer(dep, true, true); - dep->flags = DWC3_EP_ENABLED; - } /* * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index fa252870c926..86dbd012b984 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * host.c - DesignWare USB3 DRD Controller Host Glue * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com @@ -7,6 +7,7 @@ * Authors: Felipe Balbi <balbi@ti.com>, */ +#include <linux/acpi.h> #include <linux/platform_device.h> #include "core.h" @@ -75,6 +76,7 @@ int dwc3_host_init(struct dwc3 *dwc) } xhci->dev.parent = dwc->dev; + ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev)); dwc->xhci = xhci; diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index 9edff17111f7..3054b89512ff 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -227,6 +227,8 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __field(u32, size) __field(u32, ctrl) __field(u32, type) + __field(u32, enqueue) + __field(u32, dequeue) ), TP_fast_assign( __assign_str(name, dep->name); @@ -236,9 +238,12 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __entry->size = trb->size; __entry->ctrl = trb->ctrl; __entry->type = usb_endpoint_type(dep->endpoint.desc); + __entry->enqueue = dep->trb_enqueue; + __entry->dequeue = dep->trb_dequeue; ), - TP_printk("%s: trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", - __get_str(name), __entry->trb, __entry->bph, __entry->bpl, + TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", + __get_str(name), __entry->trb, __entry->enqueue, + __entry->dequeue, __entry->bph, __entry->bpl, ({char *s; int pcm = ((__entry->size >> 24) & 3) + 1; switch (__entry->type) { diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 3b4f67000315..cb4950cf1cdc 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -437,12 +437,14 @@ static u8 encode_bMaxPower(enum usb_device_speed speed, val = CONFIG_USB_GADGET_VBUS_DRAW; if (!val) return 0; - switch (speed) { - case USB_SPEED_SUPER: - return DIV_ROUND_UP(val, 8); - default: - return DIV_ROUND_UP(val, 2); - } + if (speed < USB_SPEED_SUPER) + return min(val, 500U) / 2; + else + /* + * USB 3.x supports up to 900mA, but since 900 isn't divisible + * by 8 the integral division will effectively cap to 896mA. + */ + return min(val, 900U) / 8; } static int config_buf(struct usb_configuration *config, @@ -854,7 +856,16 @@ static int set_config(struct usb_composite_dev *cdev, /* when we return, be sure our power usage is valid */ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; + if (gadget->speed < USB_SPEED_SUPER) + power = min(power, 500U); + else + power = min(power, 900U); done: + if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) + usb_gadget_set_selfpowered(gadget); + else + usb_gadget_clear_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; @@ -2273,6 +2284,7 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, 2); } @@ -2280,7 +2292,7 @@ void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; - u16 maxpower; + unsigned maxpower; /* REVISIT: should we have config level * suspend/resume callbacks? @@ -2294,10 +2306,17 @@ void composite_resume(struct usb_gadget *gadget) f->resume(f); } - maxpower = cdev->config->MaxPower; + maxpower = cdev->config->MaxPower ? + cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; + if (gadget->speed < USB_SPEED_SUPER) + maxpower = min(maxpower, 500U); + else + maxpower = min(maxpower, 900U); + + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + usb_gadget_clear_selfpowered(gadget); - usb_gadget_vbus_draw(gadget, maxpower ? - maxpower : CONFIG_USB_GADGET_VBUS_DRAW); + usb_gadget_vbus_draw(gadget, maxpower); } cdev->suspended = 0; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6171d28331e6..c81023b195c3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1120,6 +1120,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); if (unlikely(ret)) { + io_data->req = NULL; usb_ep_free_request(ep->ep, req); goto error_lock; } @@ -1162,18 +1163,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + unsigned long flags; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); + spin_lock_irqsave(&epfile->ffs->eps_lock, flags); if (likely(io_data && io_data->ep && io_data->req)) value = usb_ep_dequeue(io_data->ep, io_data->req); else value = -EINVAL; - spin_unlock_irq(&epfile->ffs->eps_lock); + spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); return value; } @@ -1702,7 +1704,7 @@ static void ffs_data_put(struct ffs_data *ffs) pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || - waitqueue_active(&ffs->ep0req_completion.wait) || + swait_active(&ffs->ep0req_completion.wait) || waitqueue_active(&ffs->wait)); destroy_workqueue(ffs->io_completion_wq); kfree(ffs->dev_name); diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 8b72b192c747..d7f6cc51b7ec 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -48,7 +48,7 @@ struct f_phonet { struct usb_ep *in_ep, *out_ep; struct usb_request *in_req; - struct usb_request *out_reqv[0]; + struct usb_request *out_reqv[]; }; static int phonet_rxq_size = 17; diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c index 6677ae932de0..349deae7cabd 100644 --- a/drivers/usb/gadget/function/f_uac1_legacy.c +++ b/drivers/usb/gadget/function/f_uac1_legacy.c @@ -752,8 +752,6 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f) audio->out_ep = ep; audio->out_ep->desc = &as_out_ep_desc; - status = -ENOMEM; - /* copy descriptors, and track endpoint copies */ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL, NULL); diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index fb0a892687c0..0b9712616455 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -428,7 +428,7 @@ uvc_register_video(struct uvc_device *uvc) video_set_drvdata(&uvc->vdev, uvc); - ret = video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1); + ret = video_register_device(&uvc->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) return ret; diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 6d956f190f5a..e6d32c536781 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -361,7 +361,7 @@ int u_audio_start_capture(struct g_audio *audio_dev) ep = audio_dev->out_ep; prm = &uac->c_prm; config_ep_by_speed(gadget, &audio_dev->func, ep); - req_len = prm->max_psize; + req_len = ep->maxpacket; prm->ep_enabled = true; usb_ep_enable(ep); @@ -379,7 +379,7 @@ int u_audio_start_capture(struct g_audio *audio_dev) req->context = &prm->ureq[i]; req->length = req_len; req->complete = u_audio_iso_complete; - req->buf = prm->rbuf + i * prm->max_psize; + req->buf = prm->rbuf + i * ep->maxpacket; } if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) @@ -430,9 +430,9 @@ int u_audio_start_playback(struct g_audio *audio_dev) uac->p_pktsize = min_t(unsigned int, uac->p_framesize * (params->p_srate / uac->p_interval), - prm->max_psize); + ep->maxpacket); - if (uac->p_pktsize < prm->max_psize) + if (uac->p_pktsize < ep->maxpacket) uac->p_pktsize_residue = uac->p_framesize * (params->p_srate % uac->p_interval); else @@ -457,7 +457,7 @@ int u_audio_start_playback(struct g_audio *audio_dev) req->context = &prm->ureq[i]; req->length = req_len; req->complete = u_audio_iso_complete; - req->buf = prm->rbuf + i * prm->max_psize; + req->buf = prm->rbuf + i * ep->maxpacket; } if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index f986e5c55974..8167d379e115 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -561,8 +561,10 @@ static int gs_start_io(struct gs_port *port) port->n_read = 0; started = gs_start_rx(port); - /* unblock any pending writes into our circular buffer */ if (started) { + gs_start_tx(port); + /* Unblock any pending writes into our circular buffer, in case + * we didn't in gs_start_tx() */ tty_wakeup(port->port.tty); } else { gs_free_requests(ep, head, &port->read_allocated); diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig index 6e7e1a9202e6..f02c38b32a2b 100644 --- a/drivers/usb/gadget/legacy/Kconfig +++ b/drivers/usb/gadget/legacy/Kconfig @@ -13,32 +13,28 @@ # With help from a special transceiver and a "Mini-AB" jack, systems with # both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG). # +# A Linux "Gadget Driver" talks to the USB Peripheral Controller +# driver through the abstract "gadget" API. Some other operating +# systems call these "client" drivers, of which "class drivers" +# are a subset (implementing a USB device class specification). +# A gadget driver implements one or more USB functions using +# the peripheral hardware. +# +# Gadget drivers are hardware-neutral, or "platform independent", +# except that they sometimes must understand quirks or limitations +# of the particular controllers they work with. For example, when +# a controller doesn't support alternate configurations or provide +# enough of the right types of endpoints, the gadget driver might +# not be able work with that controller, or might need to implement +# a less common variant of a device class protocol. +# +# The available choices each represent a single precomposed USB +# gadget configuration. In the device model, each option contains +# both the device instantiation as a child for a USB gadget +# controller, and the relevant drivers for each function declared +# by the device. -choice - tristate "USB Gadget precomposed configurations" - default USB_ETH - optional - help - A Linux "Gadget Driver" talks to the USB Peripheral Controller - driver through the abstract "gadget" API. Some other operating - systems call these "client" drivers, of which "class drivers" - are a subset (implementing a USB device class specification). - A gadget driver implements one or more USB functions using - the peripheral hardware. - - Gadget drivers are hardware-neutral, or "platform independent", - except that they sometimes must understand quirks or limitations - of the particular controllers they work with. For example, when - a controller doesn't support alternate configurations or provide - enough of the right types of endpoints, the gadget driver might - not be able work with that controller, or might need to implement - a less common variant of a device class protocol. - - The available choices each represent a single precomposed USB - gadget configuration. In the device model, each option contains - both the device instantiation as a child for a USB gadget - controller, and the relevant drivers for each function declared - by the device. +menu "USB Gadget precomposed configurations" config USB_ZERO tristate "Gadget Zero (DEVELOPMENT)" @@ -516,4 +512,15 @@ config USB_G_WEBCAM Say "y" to link the driver statically, or "m" to build a dynamically linked module called "g_webcam". -endchoice +config USB_RAW_GADGET + tristate "USB Raw Gadget" + help + USB Raw Gadget is a kernel module that provides a userspace interface + for the USB Gadget subsystem. Essentially it allows to emulate USB + devices from userspace. See Documentation/usb/raw-gadget.rst for + details. + + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "raw_gadget". + +endmenu diff --git a/drivers/usb/gadget/legacy/Makefile b/drivers/usb/gadget/legacy/Makefile index abd0c3e66a05..4d864bf82799 100644 --- a/drivers/usb/gadget/legacy/Makefile +++ b/drivers/usb/gadget/legacy/Makefile @@ -43,3 +43,4 @@ obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o obj-$(CONFIG_USB_G_NCM) += g_ncm.o obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o +obj-$(CONFIG_USB_RAW_GADGET) += raw_gadget.o diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c index 9eea2d18f2bf..265c392810d7 100644 --- a/drivers/usb/gadget/legacy/gmidi.c +++ b/drivers/usb/gadget/legacy/gmidi.c @@ -174,7 +174,7 @@ put: } static struct usb_composite_driver midi_driver = { - .name = (char *) longname, + .name = longname, .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b47938dff1a2..aa0de9e35afa 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -344,7 +344,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) spin_unlock_irq (&epdata->dev->lock); if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); + value = wait_for_completion_interruptible(&done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -353,7 +353,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) usb_ep_dequeue (epdata->ep, epdata->req); spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); + wait_for_completion(&done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { @@ -1736,7 +1736,7 @@ static struct usb_gadget_driver gadgetfs_driver = { .suspend = gadgetfs_suspend, .driver = { - .name = (char *) shortname, + .name = shortname, }, }; diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c new file mode 100644 index 000000000000..76406343fbe5 --- /dev/null +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -0,0 +1,1078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB Raw Gadget driver. + * See Documentation/usb/raw-gadget.rst for more details. + * + * Andrey Konovalov <andreyknvl@gmail.com> + */ + +#include <linux/compiler.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/kref.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/semaphore.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/wait.h> + +#include <linux/usb.h> +#include <linux/usb/ch9.h> +#include <linux/usb/ch11.h> +#include <linux/usb/gadget.h> + +#include <uapi/linux/usb/raw_gadget.h> + +#define DRIVER_DESC "USB Raw Gadget" +#define DRIVER_NAME "raw-gadget" + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Andrey Konovalov"); +MODULE_LICENSE("GPL"); + +/*----------------------------------------------------------------------*/ + +#define RAW_EVENT_QUEUE_SIZE 16 + +struct raw_event_queue { + /* See the comment in raw_event_queue_fetch() for locking details. */ + spinlock_t lock; + struct semaphore sema; + struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE]; + int size; +}; + +static void raw_event_queue_init(struct raw_event_queue *queue) +{ + spin_lock_init(&queue->lock); + sema_init(&queue->sema, 0); + queue->size = 0; +} + +static int raw_event_queue_add(struct raw_event_queue *queue, + enum usb_raw_event_type type, size_t length, const void *data) +{ + unsigned long flags; + struct usb_raw_event *event; + + spin_lock_irqsave(&queue->lock, flags); + if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) { + spin_unlock_irqrestore(&queue->lock, flags); + return -ENOMEM; + } + event = kmalloc(sizeof(*event) + length, GFP_ATOMIC); + if (!event) { + spin_unlock_irqrestore(&queue->lock, flags); + return -ENOMEM; + } + event->type = type; + event->length = length; + if (event->length) + memcpy(&event->data[0], data, length); + queue->events[queue->size] = event; + queue->size++; + up(&queue->sema); + spin_unlock_irqrestore(&queue->lock, flags); + return 0; +} + +static struct usb_raw_event *raw_event_queue_fetch( + struct raw_event_queue *queue) +{ + unsigned long flags; + struct usb_raw_event *event; + + /* + * This function can be called concurrently. We first check that + * there's at least one event queued by decrementing the semaphore, + * and then take the lock to protect queue struct fields. + */ + if (down_interruptible(&queue->sema)) + return NULL; + spin_lock_irqsave(&queue->lock, flags); + if (WARN_ON(!queue->size)) + return NULL; + event = queue->events[0]; + queue->size--; + memmove(&queue->events[0], &queue->events[1], + queue->size * sizeof(queue->events[0])); + spin_unlock_irqrestore(&queue->lock, flags); + return event; +} + +static void raw_event_queue_destroy(struct raw_event_queue *queue) +{ + int i; + + for (i = 0; i < queue->size; i++) + kfree(queue->events[i]); + queue->size = 0; +} + +/*----------------------------------------------------------------------*/ + +struct raw_dev; + +#define USB_RAW_MAX_ENDPOINTS 32 + +enum ep_state { + STATE_EP_DISABLED, + STATE_EP_ENABLED, +}; + +struct raw_ep { + struct raw_dev *dev; + enum ep_state state; + struct usb_ep *ep; + struct usb_request *req; + bool urb_queued; + bool disabling; + ssize_t status; +}; + +enum dev_state { + STATE_DEV_INVALID = 0, + STATE_DEV_OPENED, + STATE_DEV_INITIALIZED, + STATE_DEV_RUNNING, + STATE_DEV_CLOSED, + STATE_DEV_FAILED +}; + +struct raw_dev { + struct kref count; + spinlock_t lock; + + const char *udc_name; + struct usb_gadget_driver driver; + + /* Reference to misc device: */ + struct device *dev; + + /* Protected by lock: */ + enum dev_state state; + bool gadget_registered; + struct usb_gadget *gadget; + struct usb_request *req; + bool ep0_in_pending; + bool ep0_out_pending; + bool ep0_urb_queued; + ssize_t ep0_status; + struct raw_ep eps[USB_RAW_MAX_ENDPOINTS]; + + struct completion ep0_done; + struct raw_event_queue queue; +}; + +static struct raw_dev *dev_new(void) +{ + struct raw_dev *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + /* Matches kref_put() in raw_release(). */ + kref_init(&dev->count); + spin_lock_init(&dev->lock); + init_completion(&dev->ep0_done); + raw_event_queue_init(&dev->queue); + return dev; +} + +static void dev_free(struct kref *kref) +{ + struct raw_dev *dev = container_of(kref, struct raw_dev, count); + int i; + + kfree(dev->udc_name); + kfree(dev->driver.udc_name); + if (dev->req) { + if (dev->ep0_urb_queued) + usb_ep_dequeue(dev->gadget->ep0, dev->req); + usb_ep_free_request(dev->gadget->ep0, dev->req); + } + raw_event_queue_destroy(&dev->queue); + for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { + if (dev->eps[i].state != STATE_EP_ENABLED) + continue; + usb_ep_disable(dev->eps[i].ep); + usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); + kfree(dev->eps[i].ep->desc); + dev->eps[i].state = STATE_EP_DISABLED; + } + kfree(dev); +} + +/*----------------------------------------------------------------------*/ + +static int raw_queue_event(struct raw_dev *dev, + enum usb_raw_event_type type, size_t length, const void *data) +{ + int ret = 0; + unsigned long flags; + + ret = raw_event_queue_add(&dev->queue, type, length, data); + if (ret < 0) { + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_FAILED; + spin_unlock_irqrestore(&dev->lock, flags); + } + return ret; +} + +static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct raw_dev *dev = req->context; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (req->status) + dev->ep0_status = req->status; + else + dev->ep0_status = req->actual; + if (dev->ep0_in_pending) + dev->ep0_in_pending = false; + else + dev->ep0_out_pending = false; + spin_unlock_irqrestore(&dev->lock, flags); + + complete(&dev->ep0_done); +} + +static int gadget_bind(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + int ret = 0; + struct raw_dev *dev = container_of(driver, struct raw_dev, driver); + struct usb_request *req; + unsigned long flags; + + if (strcmp(gadget->name, dev->udc_name) != 0) + return -ENODEV; + + set_gadget_data(gadget, dev); + req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); + if (!req) { + dev_err(&gadget->dev, "usb_ep_alloc_request failed\n"); + set_gadget_data(gadget, NULL); + return -ENOMEM; + } + + spin_lock_irqsave(&dev->lock, flags); + dev->req = req; + dev->req->context = dev; + dev->req->complete = gadget_ep0_complete; + dev->gadget = gadget; + spin_unlock_irqrestore(&dev->lock, flags); + + /* Matches kref_put() in gadget_unbind(). */ + kref_get(&dev->count); + + ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL); + if (ret < 0) + dev_err(&gadget->dev, "failed to queue event\n"); + + return ret; +} + +static void gadget_unbind(struct usb_gadget *gadget) +{ + struct raw_dev *dev = get_gadget_data(gadget); + + set_gadget_data(gadget, NULL); + /* Matches kref_get() in gadget_bind(). */ + kref_put(&dev->count, dev_free); +} + +static int gadget_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *ctrl) +{ + int ret = 0; + struct raw_dev *dev = get_gadget_data(gadget); + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_err(&gadget->dev, "ignoring, device is not running\n"); + ret = -ENODEV; + goto out_unlock; + } + if (dev->ep0_in_pending || dev->ep0_out_pending) { + dev_dbg(&gadget->dev, "stalling, request already pending\n"); + ret = -EBUSY; + goto out_unlock; + } + if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) + dev->ep0_in_pending = true; + else + dev->ep0_out_pending = true; + spin_unlock_irqrestore(&dev->lock, flags); + + ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl); + if (ret < 0) + dev_err(&gadget->dev, "failed to queue event\n"); + goto out; + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); +out: + return ret; +} + +/* These are currently unused but present in case UDC driver requires them. */ +static void gadget_disconnect(struct usb_gadget *gadget) { } +static void gadget_suspend(struct usb_gadget *gadget) { } +static void gadget_resume(struct usb_gadget *gadget) { } +static void gadget_reset(struct usb_gadget *gadget) { } + +/*----------------------------------------------------------------------*/ + +static struct miscdevice raw_misc_device; + +static int raw_open(struct inode *inode, struct file *fd) +{ + struct raw_dev *dev; + + /* Nonblocking I/O is not supported yet. */ + if (fd->f_flags & O_NONBLOCK) + return -EINVAL; + + dev = dev_new(); + if (!dev) + return -ENOMEM; + fd->private_data = dev; + dev->state = STATE_DEV_OPENED; + dev->dev = raw_misc_device.this_device; + return 0; +} + +static int raw_release(struct inode *inode, struct file *fd) +{ + int ret = 0; + struct raw_dev *dev = fd->private_data; + unsigned long flags; + bool unregister = false; + + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_CLOSED; + if (!dev->gadget) { + spin_unlock_irqrestore(&dev->lock, flags); + goto out_put; + } + if (dev->gadget_registered) + unregister = true; + dev->gadget_registered = false; + spin_unlock_irqrestore(&dev->lock, flags); + + if (unregister) { + ret = usb_gadget_unregister_driver(&dev->driver); + if (ret != 0) + dev_err(dev->dev, + "usb_gadget_unregister_driver() failed with %d\n", + ret); + /* Matches kref_get() in raw_ioctl_run(). */ + kref_put(&dev->count, dev_free); + } + +out_put: + /* Matches dev_new() in raw_open(). */ + kref_put(&dev->count, dev_free); + return ret; +} + +/*----------------------------------------------------------------------*/ + +static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + struct usb_raw_init arg; + char *udc_driver_name; + char *udc_device_name; + unsigned long flags; + + ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); + if (ret) + return ret; + + switch (arg.speed) { + case USB_SPEED_UNKNOWN: + arg.speed = USB_SPEED_HIGH; + break; + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + break; + default: + return -EINVAL; + } + + udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); + if (!udc_driver_name) + return -ENOMEM; + ret = strscpy(udc_driver_name, &arg.driver_name[0], + UDC_NAME_LENGTH_MAX); + if (ret < 0) { + kfree(udc_driver_name); + return ret; + } + ret = 0; + + udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); + if (!udc_device_name) { + kfree(udc_driver_name); + return -ENOMEM; + } + ret = strscpy(udc_device_name, &arg.device_name[0], + UDC_NAME_LENGTH_MAX); + if (ret < 0) { + kfree(udc_driver_name); + kfree(udc_device_name); + return ret; + } + ret = 0; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_OPENED) { + dev_dbg(dev->dev, "fail, device is not opened\n"); + kfree(udc_driver_name); + kfree(udc_device_name); + ret = -EINVAL; + goto out_unlock; + } + dev->udc_name = udc_driver_name; + + dev->driver.function = DRIVER_DESC; + dev->driver.max_speed = arg.speed; + dev->driver.setup = gadget_setup; + dev->driver.disconnect = gadget_disconnect; + dev->driver.bind = gadget_bind; + dev->driver.unbind = gadget_unbind; + dev->driver.suspend = gadget_suspend; + dev->driver.resume = gadget_resume; + dev->driver.reset = gadget_reset; + dev->driver.driver.name = DRIVER_NAME; + dev->driver.udc_name = udc_device_name; + dev->driver.match_existing_only = 1; + + dev->state = STATE_DEV_INITIALIZED; + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_run(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + unsigned long flags; + + if (value) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_INITIALIZED) { + dev_dbg(dev->dev, "fail, device is not initialized\n"); + ret = -EINVAL; + goto out_unlock; + } + spin_unlock_irqrestore(&dev->lock, flags); + + ret = usb_gadget_probe_driver(&dev->driver); + + spin_lock_irqsave(&dev->lock, flags); + if (ret) { + dev_err(dev->dev, + "fail, usb_gadget_probe_driver returned %d\n", ret); + dev->state = STATE_DEV_FAILED; + goto out_unlock; + } + dev->gadget_registered = true; + dev->state = STATE_DEV_RUNNING; + /* Matches kref_put() in raw_release(). */ + kref_get(&dev->count); + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + struct usb_raw_event arg; + unsigned long flags; + struct usb_raw_event *event; + uint32_t length; + + ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); + if (ret) + return ret; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + spin_unlock_irqrestore(&dev->lock, flags); + return -EINVAL; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + spin_unlock_irqrestore(&dev->lock, flags); + return -EBUSY; + } + spin_unlock_irqrestore(&dev->lock, flags); + + event = raw_event_queue_fetch(&dev->queue); + if (!event) { + dev_dbg(&dev->gadget->dev, "event fetching interrupted\n"); + return -EINTR; + } + length = min(arg.length, event->length); + ret = copy_to_user((void __user *)value, event, + sizeof(*event) + length); + return ret; +} + +static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, + bool get_from_user) +{ + int ret; + void *data; + + ret = copy_from_user(io, ptr, sizeof(*io)); + if (ret) + return ERR_PTR(ret); + if (io->ep >= USB_RAW_MAX_ENDPOINTS) + return ERR_PTR(-EINVAL); + if (!usb_raw_io_flags_valid(io->flags)) + return ERR_PTR(-EINVAL); + if (io->length > PAGE_SIZE) + return ERR_PTR(-EINVAL); + if (get_from_user) + data = memdup_user(ptr + sizeof(*io), io->length); + else { + data = kmalloc(io->length, GFP_KERNEL); + if (!data) + data = ERR_PTR(-ENOMEM); + } + return data; +} + +static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io, + void *data, bool in) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->ep0_urb_queued) { + dev_dbg(&dev->gadget->dev, "fail, urb already queued\n"); + ret = -EBUSY; + goto out_unlock; + } + if ((in && !dev->ep0_in_pending) || + (!in && !dev->ep0_out_pending)) { + dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); + ret = -EBUSY; + goto out_unlock; + } + if (WARN_ON(in && dev->ep0_out_pending)) { + ret = -ENODEV; + dev->state = STATE_DEV_FAILED; + goto out_done; + } + if (WARN_ON(!in && dev->ep0_in_pending)) { + ret = -ENODEV; + dev->state = STATE_DEV_FAILED; + goto out_done; + } + + dev->req->buf = data; + dev->req->length = io->length; + dev->req->zero = usb_raw_io_flags_zero(io->flags); + dev->ep0_urb_queued = true; + spin_unlock_irqrestore(&dev->lock, flags); + + ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL); + if (ret) { + dev_err(&dev->gadget->dev, + "fail, usb_ep_queue returned %d\n", ret); + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_FAILED; + goto out_done; + } + + ret = wait_for_completion_interruptible(&dev->ep0_done); + if (ret) { + dev_dbg(&dev->gadget->dev, "wait interrupted\n"); + usb_ep_dequeue(dev->gadget->ep0, dev->req); + wait_for_completion(&dev->ep0_done); + spin_lock_irqsave(&dev->lock, flags); + goto out_done; + } + + spin_lock_irqsave(&dev->lock, flags); + ret = dev->ep0_status; + +out_done: + dev->ep0_urb_queued = false; +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + void *data; + struct usb_raw_ep_io io; + + data = raw_alloc_io_data(&io, (void __user *)value, true); + if (IS_ERR(data)) + return PTR_ERR(data); + ret = raw_process_ep0_io(dev, &io, data, true); + kfree(data); + return ret; +} + +static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + void *data; + struct usb_raw_ep_io io; + unsigned int length; + + data = raw_alloc_io_data(&io, (void __user *)value, false); + if (IS_ERR(data)) + return PTR_ERR(data); + ret = raw_process_ep0_io(dev, &io, data, false); + if (ret < 0) { + kfree(data); + return ret; + } + length = min(io.length, (unsigned int)ret); + ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + kfree(data); + return ret; +} + +static bool check_ep_caps(struct usb_ep *ep, + struct usb_endpoint_descriptor *desc) +{ + switch (usb_endpoint_type(desc)) { + case USB_ENDPOINT_XFER_ISOC: + if (!ep->caps.type_iso) + return false; + break; + case USB_ENDPOINT_XFER_BULK: + if (!ep->caps.type_bulk) + return false; + break; + case USB_ENDPOINT_XFER_INT: + if (!ep->caps.type_int) + return false; + break; + default: + return false; + } + + if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) + return false; + if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) + return false; + + return true; +} + +static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) +{ + int ret = 0, i; + unsigned long flags; + struct usb_endpoint_descriptor *desc; + struct usb_ep *ep = NULL; + + desc = memdup_user((void __user *)value, sizeof(*desc)); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + /* + * Endpoints with a maxpacket length of 0 can cause crashes in UDC + * drivers. + */ + if (usb_endpoint_maxp(desc) == 0) { + dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n"); + kfree(desc); + return -EINVAL; + } + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_free; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_free; + } + + for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { + if (dev->eps[i].state == STATE_EP_ENABLED) + continue; + break; + } + if (i == USB_RAW_MAX_ENDPOINTS) { + dev_dbg(&dev->gadget->dev, + "fail, no device endpoints available\n"); + ret = -EBUSY; + goto out_free; + } + + gadget_for_each_ep(ep, dev->gadget) { + if (ep->enabled) + continue; + if (!check_ep_caps(ep, desc)) + continue; + ep->desc = desc; + ret = usb_ep_enable(ep); + if (ret < 0) { + dev_err(&dev->gadget->dev, + "fail, usb_ep_enable returned %d\n", ret); + goto out_free; + } + dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC); + if (!dev->eps[i].req) { + dev_err(&dev->gadget->dev, + "fail, usb_ep_alloc_request failed\n"); + usb_ep_disable(ep); + ret = -ENOMEM; + goto out_free; + } + dev->eps[i].ep = ep; + dev->eps[i].state = STATE_EP_ENABLED; + ep->driver_data = &dev->eps[i]; + ret = i; + goto out_unlock; + } + + dev_dbg(&dev->gadget->dev, "fail, no gadget endpoints available\n"); + ret = -EBUSY; + +out_free: + kfree(desc); +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) +{ + int ret = 0, i = value; + unsigned long flags; + const void *desc; + + if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->eps[i].state != STATE_EP_ENABLED) { + dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].disabling) { + dev_dbg(&dev->gadget->dev, + "fail, disable already in progress\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].urb_queued) { + dev_dbg(&dev->gadget->dev, + "fail, waiting for urb completion\n"); + ret = -EINVAL; + goto out_unlock; + } + dev->eps[i].disabling = true; + spin_unlock_irqrestore(&dev->lock, flags); + + usb_ep_disable(dev->eps[i].ep); + + spin_lock_irqsave(&dev->lock, flags); + usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); + desc = dev->eps[i].ep->desc; + dev->eps[i].ep = NULL; + dev->eps[i].state = STATE_EP_DISABLED; + kfree(desc); + dev->eps[i].disabling = false; + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data; + struct raw_dev *dev = r_ep->dev; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (req->status) + r_ep->status = req->status; + else + r_ep->status = req->actual; + spin_unlock_irqrestore(&dev->lock, flags); + + complete((struct completion *)req->context); +} + +static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, + void *data, bool in) +{ + int ret = 0; + unsigned long flags; + struct raw_ep *ep = &dev->eps[io->ep]; + DECLARE_COMPLETION_ONSTACK(done); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (ep->state != STATE_EP_ENABLED) { + dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); + ret = -EBUSY; + goto out_unlock; + } + if (ep->disabling) { + dev_dbg(&dev->gadget->dev, + "fail, endpoint is already being disabled\n"); + ret = -EBUSY; + goto out_unlock; + } + if (ep->urb_queued) { + dev_dbg(&dev->gadget->dev, "fail, urb already queued\n"); + ret = -EBUSY; + goto out_unlock; + } + if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); + ret = -EINVAL; + goto out_unlock; + } + + ep->dev = dev; + ep->req->context = &done; + ep->req->complete = gadget_ep_complete; + ep->req->buf = data; + ep->req->length = io->length; + ep->req->zero = usb_raw_io_flags_zero(io->flags); + ep->urb_queued = true; + spin_unlock_irqrestore(&dev->lock, flags); + + ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL); + if (ret) { + dev_err(&dev->gadget->dev, + "fail, usb_ep_queue returned %d\n", ret); + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_FAILED; + goto out_done; + } + + ret = wait_for_completion_interruptible(&done); + if (ret) { + dev_dbg(&dev->gadget->dev, "wait interrupted\n"); + usb_ep_dequeue(ep->ep, ep->req); + wait_for_completion(&done); + spin_lock_irqsave(&dev->lock, flags); + goto out_done; + } + + spin_lock_irqsave(&dev->lock, flags); + ret = ep->status; + +out_done: + ep->urb_queued = false; +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + char *data; + struct usb_raw_ep_io io; + + data = raw_alloc_io_data(&io, (void __user *)value, true); + if (IS_ERR(data)) + return PTR_ERR(data); + ret = raw_process_ep_io(dev, &io, data, true); + kfree(data); + return ret; +} + +static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + char *data; + struct usb_raw_ep_io io; + unsigned int length; + + data = raw_alloc_io_data(&io, (void __user *)value, false); + if (IS_ERR(data)) + return PTR_ERR(data); + ret = raw_process_ep_io(dev, &io, data, false); + if (ret < 0) { + kfree(data); + return ret; + } + length = min(io.length, (unsigned int)ret); + ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + kfree(data); + return ret; +} + +static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + unsigned long flags; + + if (value) + return -EINVAL; + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED); + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + usb_gadget_vbus_draw(dev->gadget, 2 * value); + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value) +{ + struct raw_dev *dev = fd->private_data; + int ret = 0; + + if (!dev) + return -EBUSY; + + switch (cmd) { + case USB_RAW_IOCTL_INIT: + ret = raw_ioctl_init(dev, value); + break; + case USB_RAW_IOCTL_RUN: + ret = raw_ioctl_run(dev, value); + break; + case USB_RAW_IOCTL_EVENT_FETCH: + ret = raw_ioctl_event_fetch(dev, value); + break; + case USB_RAW_IOCTL_EP0_WRITE: + ret = raw_ioctl_ep0_write(dev, value); + break; + case USB_RAW_IOCTL_EP0_READ: + ret = raw_ioctl_ep0_read(dev, value); + break; + case USB_RAW_IOCTL_EP_ENABLE: + ret = raw_ioctl_ep_enable(dev, value); + break; + case USB_RAW_IOCTL_EP_DISABLE: + ret = raw_ioctl_ep_disable(dev, value); + break; + case USB_RAW_IOCTL_EP_WRITE: + ret = raw_ioctl_ep_write(dev, value); + break; + case USB_RAW_IOCTL_EP_READ: + ret = raw_ioctl_ep_read(dev, value); + break; + case USB_RAW_IOCTL_CONFIGURE: + ret = raw_ioctl_configure(dev, value); + break; + case USB_RAW_IOCTL_VBUS_DRAW: + ret = raw_ioctl_vbus_draw(dev, value); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/*----------------------------------------------------------------------*/ + +static const struct file_operations raw_fops = { + .open = raw_open, + .unlocked_ioctl = raw_ioctl, + .compat_ioctl = raw_ioctl, + .release = raw_release, + .llseek = no_llseek, +}; + +static struct miscdevice raw_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &raw_fops, +}; + +module_misc_device(raw_misc_device); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 797d6ace8994..3a7179e90f4e 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -441,11 +441,20 @@ config USB_GADGET_XILINX dynamically linked module called "udc-xilinx" and force all gadget drivers to also be dynamically linked. +config USB_MAX3420_UDC + tristate "MAX3420 (USB-over-SPI) support" + depends on SPI + help + The Maxim MAX3420 chip supports USB2.0 full-speed peripheral mode. + The MAX3420 is run by SPI interface, and hence the dependency. + + To compile this driver as a module, choose M here: the module will + be called max3420_udc + config USB_TEGRA_XUDC tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PHY_TEGRA_XUSB - select USB_ROLE_SWITCH help Enables NVIDIA Tegra USB 3.0 device mode controller driver. diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile index f6777e654a8e..f5a7ce28aecd 100644 --- a/drivers/usb/gadget/udc/Makefile +++ b/drivers/usb/gadget/udc/Makefile @@ -42,3 +42,4 @@ obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/ obj-$(CONFIG_USB_BDC_UDC) += bdc/ +obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h index dfdef6a28904..0262383f8c79 100644 --- a/drivers/usb/gadget/udc/amd5536udc.h +++ b/drivers/usb/gadget/udc/amd5536udc.h @@ -440,7 +440,7 @@ struct udc_ep_regs { /* endpoint data descriptor pointer */ u32 desptr; - /* reserverd */ + /* reserved */ u32 reserved; /* write/read confirmation */ diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index bfd1c9e80a1f..80685e4306f3 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -202,7 +202,7 @@ MODULE_DEVICE_TABLE(pci, pci_id); /* PCI functions */ static struct pci_driver udc_pci_driver = { - .name = (char *) name, + .name = name, .id_table = pci_id, .probe = udc_pci_probe, .remove = udc_pci_remove, diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig index 83ba8a2eb6af..605500b19cf3 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig +++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig @@ -4,5 +4,5 @@ config USB_ASPEED_VHUB depends on ARCH_ASPEED || COMPILE_TEST depends on USB_LIBCOMPOSITE help - USB peripheral controller for the Aspeed AST2500 family - SoCs supporting the "vHub" functionality and USB2.0 + USB peripheral controller for the Aspeed AST2400, AST2500 and + AST2600 family SoCs supporting the "vHub" functionality and USB2.0 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index 90b134d5dca9..f8d35dd60c34 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -99,7 +99,7 @@ static irqreturn_t ast_vhub_irq(int irq, void *data) { struct ast_vhub *vhub = data; irqreturn_t iret = IRQ_NONE; - u32 istat; + u32 i, istat; /* Stale interrupt while tearing down */ if (!vhub->ep0_bufs) @@ -121,10 +121,10 @@ static irqreturn_t ast_vhub_irq(int irq, void *data) /* Handle generic EPs first */ if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) { - u32 i, ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR); + u32 ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR); writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR); - for (i = 0; ep_acks && i < AST_VHUB_NUM_GEN_EPs; i++) { + for (i = 0; ep_acks && i < vhub->max_epns; i++) { u32 mask = VHUB_EP_IRQ(i); if (ep_acks & mask) { ast_vhub_epn_ack_irq(&vhub->epns[i]); @@ -134,21 +134,11 @@ static irqreturn_t ast_vhub_irq(int irq, void *data) } /* Handle device interrupts */ - if (istat & (VHUB_IRQ_DEVICE1 | - VHUB_IRQ_DEVICE2 | - VHUB_IRQ_DEVICE3 | - VHUB_IRQ_DEVICE4 | - VHUB_IRQ_DEVICE5)) { - if (istat & VHUB_IRQ_DEVICE1) - ast_vhub_dev_irq(&vhub->ports[0].dev); - if (istat & VHUB_IRQ_DEVICE2) - ast_vhub_dev_irq(&vhub->ports[1].dev); - if (istat & VHUB_IRQ_DEVICE3) - ast_vhub_dev_irq(&vhub->ports[2].dev); - if (istat & VHUB_IRQ_DEVICE4) - ast_vhub_dev_irq(&vhub->ports[3].dev); - if (istat & VHUB_IRQ_DEVICE5) - ast_vhub_dev_irq(&vhub->ports[4].dev); + for (i = 0; i < vhub->max_ports; i++) { + u32 dev_mask = VHUB_IRQ_DEVICE1 << i; + + if (istat & dev_mask) + ast_vhub_dev_irq(&vhub->ports[i].dev); } /* Handle top-level vHub EP0 interrupts */ @@ -182,7 +172,7 @@ static irqreturn_t ast_vhub_irq(int irq, void *data) void ast_vhub_init_hw(struct ast_vhub *vhub) { - u32 ctrl; + u32 ctrl, port_mask, epn_mask; UDCDBG(vhub,"(Re)Starting HW ...\n"); @@ -222,15 +212,20 @@ void ast_vhub_init_hw(struct ast_vhub *vhub) } /* Reset all devices */ - writel(VHUB_SW_RESET_ALL, vhub->regs + AST_VHUB_SW_RESET); + port_mask = GENMASK(vhub->max_ports, 1); + writel(VHUB_SW_RESET_ROOT_HUB | + VHUB_SW_RESET_DMA_CONTROLLER | + VHUB_SW_RESET_EP_POOL | + port_mask, vhub->regs + AST_VHUB_SW_RESET); udelay(1); writel(0, vhub->regs + AST_VHUB_SW_RESET); /* Disable and cleanup EP ACK/NACK interrupts */ + epn_mask = GENMASK(vhub->max_epns - 1, 0); writel(0, vhub->regs + AST_VHUB_EP_ACK_IER); writel(0, vhub->regs + AST_VHUB_EP_NACK_IER); - writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_ACK_ISR); - writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_NACK_ISR); + writel(epn_mask, vhub->regs + AST_VHUB_EP_ACK_ISR); + writel(epn_mask, vhub->regs + AST_VHUB_EP_NACK_ISR); /* Default settings for EP0, enable HW hub EP1 */ writel(0, vhub->regs + AST_VHUB_EP0_CTRL); @@ -273,7 +268,7 @@ static int ast_vhub_remove(struct platform_device *pdev) return 0; /* Remove devices */ - for (i = 0; i < AST_VHUB_NUM_PORTS; i++) + for (i = 0; i < vhub->max_ports; i++) ast_vhub_del_dev(&vhub->ports[i].dev); spin_lock_irqsave(&vhub->lock, flags); @@ -295,7 +290,7 @@ static int ast_vhub_remove(struct platform_device *pdev) if (vhub->ep0_bufs) dma_free_coherent(&pdev->dev, AST_VHUB_EP0_MAX_PACKET * - (AST_VHUB_NUM_PORTS + 1), + (vhub->max_ports + 1), vhub->ep0_bufs, vhub->ep0_bufs_dma); vhub->ep0_bufs = NULL; @@ -309,11 +304,32 @@ static int ast_vhub_probe(struct platform_device *pdev) struct ast_vhub *vhub; struct resource *res; int i, rc = 0; + const struct device_node *np = pdev->dev.of_node; vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL); if (!vhub) return -ENOMEM; + rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports", + &vhub->max_ports); + if (rc < 0) + vhub->max_ports = AST_VHUB_NUM_PORTS; + + vhub->ports = devm_kcalloc(&pdev->dev, vhub->max_ports, + sizeof(*vhub->ports), GFP_KERNEL); + if (!vhub->ports) + return -ENOMEM; + + rc = of_property_read_u32(np, "aspeed,vhub-generic-endpoints", + &vhub->max_epns); + if (rc < 0) + vhub->max_epns = AST_VHUB_NUM_GEN_EPs; + + vhub->epns = devm_kcalloc(&pdev->dev, vhub->max_epns, + sizeof(*vhub->epns), GFP_KERNEL); + if (!vhub->epns) + return -ENOMEM; + spin_lock_init(&vhub->lock); vhub->pdev = pdev; @@ -366,7 +382,7 @@ static int ast_vhub_probe(struct platform_device *pdev) */ vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev, AST_VHUB_EP0_MAX_PACKET * - (AST_VHUB_NUM_PORTS + 1), + (vhub->max_ports + 1), &vhub->ep0_bufs_dma, GFP_KERNEL); if (!vhub->ep0_bufs) { dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n"); @@ -380,7 +396,7 @@ static int ast_vhub_probe(struct platform_device *pdev) ast_vhub_init_ep0(vhub, &vhub->ep0, NULL); /* Init devices */ - for (i = 0; i < AST_VHUB_NUM_PORTS && rc == 0; i++) + for (i = 0; i < vhub->max_ports && rc == 0; i++) rc = ast_vhub_init_dev(vhub, i); if (rc) goto err; @@ -407,6 +423,9 @@ static const struct of_device_id ast_vhub_dt_ids[] = { { .compatible = "aspeed,ast2500-usb-vhub", }, + { + .compatible = "aspeed,ast2600-usb-vhub", + }, { } }; MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids); diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c index 4008e7a51188..d268306a7bfe 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c @@ -77,7 +77,7 @@ static void ast_vhub_dev_enable(struct ast_vhub_dev *d) writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA); /* Clear stall on all EPs */ - for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { + for (i = 0; i < d->max_epns; i++) { struct ast_vhub_ep *ep = d->epns[i]; if (ep && (ep->epn.stalled || ep->epn.wedged)) { @@ -137,7 +137,7 @@ static int ast_vhub_ep_feature(struct ast_vhub_dev *d, is_set ? "SET" : "CLEAR", ep_num, wValue); if (ep_num == 0) return std_req_complete; - if (ep_num >= AST_VHUB_NUM_GEN_EPs || !d->epns[ep_num - 1]) + if (ep_num >= d->max_epns || !d->epns[ep_num - 1]) return std_req_stall; if (wValue != USB_ENDPOINT_HALT) return std_req_driver; @@ -181,7 +181,7 @@ static int ast_vhub_ep_status(struct ast_vhub_dev *d, DDBG(d, "GET_STATUS(ep%d)\n", ep_num); - if (ep_num >= AST_VHUB_NUM_GEN_EPs) + if (ep_num >= d->max_epns) return std_req_stall; if (ep_num != 0) { ep = d->epns[ep_num - 1]; @@ -299,7 +299,7 @@ static void ast_vhub_dev_nuke(struct ast_vhub_dev *d) { unsigned int i; - for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { + for (i = 0; i < d->max_epns; i++) { if (!d->epns[i]) continue; ast_vhub_nuke(d->epns[i], -ESHUTDOWN); @@ -416,10 +416,10 @@ static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget, * that will allow the generic code to use our * assigned address. */ - for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) + for (i = 0; i < d->max_epns; i++) if (d->epns[i] == NULL) break; - if (i >= AST_VHUB_NUM_GEN_EPs) + if (i >= d->max_epns) return NULL; addr = i + 1; @@ -526,6 +526,7 @@ void ast_vhub_del_dev(struct ast_vhub_dev *d) usb_del_gadget_udc(&d->gadget); device_unregister(d->port_dev); + kfree(d->epns); } static void ast_vhub_dev_release(struct device *dev) @@ -547,13 +548,24 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) ast_vhub_init_ep0(vhub, &d->ep0, d); /* + * A USB device can have up to 30 endpoints besides control + * endpoint 0. + */ + d->max_epns = min_t(u32, vhub->max_epns, 30); + d->epns = kcalloc(d->max_epns, sizeof(*d->epns), GFP_KERNEL); + if (!d->epns) + return -ENOMEM; + + /* * The UDC core really needs us to have separate and uniquely * named "parent" devices for each port so we create a sub device * here for that purpose */ d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!d->port_dev) - return -ENOMEM; + if (!d->port_dev) { + rc = -ENOMEM; + goto fail_alloc; + } device_initialize(d->port_dev); d->port_dev->release = ast_vhub_dev_release; d->port_dev->parent = parent; @@ -584,6 +596,8 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) device_del(d->port_dev); fail_add: put_device(d->port_dev); + fail_alloc: + kfree(d->epns); return rc; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 7475c74aa5c5..0bd6b20435b8 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -800,10 +800,10 @@ struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) /* Find a free one (no device) */ spin_lock_irqsave(&vhub->lock, flags); - for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) + for (i = 0; i < vhub->max_epns; i++) if (vhub->epns[i].dev == NULL) break; - if (i >= AST_VHUB_NUM_GEN_EPs) { + if (i >= vhub->max_epns) { spin_unlock_irqrestore(&vhub->lock, flags); return NULL; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c index 19b3517e04c0..6e565c3dbb5b 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c @@ -93,11 +93,7 @@ static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc) USB_DT_INTERFACE_SIZE + \ USB_DT_ENDPOINT_SIZE) -static const struct ast_vhub_full_cdesc { - struct usb_config_descriptor cfg; - struct usb_interface_descriptor intf; - struct usb_endpoint_descriptor ep; -} __attribute__ ((packed)) ast_vhub_conf_desc = { +static const struct ast_vhub_full_cdesc ast_vhub_conf_desc = { .cfg = { .bLength = USB_DT_CONFIG_SIZE, .bDescriptorType = USB_DT_CONFIG, @@ -266,6 +262,7 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep, u8 desc_type, u16 len) { size_t dsize; + struct ast_vhub *vhub = ep->vhub; EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type); @@ -281,20 +278,20 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep, switch(desc_type) { case USB_DT_DEVICE: dsize = USB_DT_DEVICE_SIZE; - memcpy(ep->buf, &ast_vhub_dev_desc, dsize); - BUILD_BUG_ON(dsize > sizeof(ast_vhub_dev_desc)); + memcpy(ep->buf, &vhub->vhub_dev_desc, dsize); + BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc)); BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET); break; case USB_DT_CONFIG: dsize = AST_VHUB_CONF_DESC_SIZE; - memcpy(ep->buf, &ast_vhub_conf_desc, dsize); - BUILD_BUG_ON(dsize > sizeof(ast_vhub_conf_desc)); + memcpy(ep->buf, &vhub->vhub_conf_desc, dsize); + BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc)); BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET); break; case USB_DT_HUB: dsize = AST_VHUB_HUB_DESC_SIZE; - memcpy(ep->buf, &ast_vhub_hub_desc, dsize); - BUILD_BUG_ON(dsize > sizeof(ast_vhub_hub_desc)); + memcpy(ep->buf, &vhub->vhub_hub_desc, dsize); + BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc)); BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET); break; default: @@ -317,7 +314,8 @@ static int ast_vhub_rep_string(struct ast_vhub_ep *ep, u8 string_id, u16 lang_id, u16 len) { - int rc = usb_gadget_get_string (&ast_vhub_strings, string_id, ep->buf); + int rc = usb_gadget_get_string(&ep->vhub->vhub_str_desc, + string_id, ep->buf); /* * This should never happen unless we put too big strings in @@ -504,7 +502,7 @@ static void ast_vhub_wake_work(struct work_struct *work) * we let the normal host wake path deal with it later. */ spin_lock_irqsave(&vhub->lock, flags); - for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { + for (i = 0; i < vhub->max_ports; i++) { struct ast_vhub_port *p = &vhub->ports[i]; if (!(p->status & USB_PORT_STAT_SUSPEND)) @@ -587,7 +585,7 @@ static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep, struct ast_vhub *vhub = ep->vhub; struct ast_vhub_port *p; - if (port == 0 || port > AST_VHUB_NUM_PORTS) + if (port == 0 || port > vhub->max_ports) return std_req_stall; port--; p = &vhub->ports[port]; @@ -630,7 +628,7 @@ static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep, struct ast_vhub *vhub = ep->vhub; struct ast_vhub_port *p; - if (port == 0 || port > AST_VHUB_NUM_PORTS) + if (port == 0 || port > vhub->max_ports) return std_req_stall; port--; p = &vhub->ports[port]; @@ -676,7 +674,7 @@ static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep, struct ast_vhub *vhub = ep->vhub; u16 stat, chg; - if (port == 0 || port > AST_VHUB_NUM_PORTS) + if (port == 0 || port > vhub->max_ports) return std_req_stall; port--; @@ -757,7 +755,7 @@ void ast_vhub_hub_suspend(struct ast_vhub *vhub) * Forward to unsuspended ports without changing * their connection status. */ - for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { + for (i = 0; i < vhub->max_ports; i++) { struct ast_vhub_port *p = &vhub->ports[i]; if (!(p->status & USB_PORT_STAT_SUSPEND)) @@ -780,7 +778,7 @@ void ast_vhub_hub_resume(struct ast_vhub *vhub) * Forward to unsuspended ports without changing * their connection status. */ - for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { + for (i = 0; i < vhub->max_ports; i++) { struct ast_vhub_port *p = &vhub->ports[i]; if (!(p->status & USB_PORT_STAT_SUSPEND)) @@ -814,7 +812,7 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub) * Clear all port status, disable gadgets and "suspend" * them. They will be woken up by a port reset. */ - for (i = 0; i < AST_VHUB_NUM_PORTS; i++) { + for (i = 0; i < vhub->max_ports; i++) { struct ast_vhub_port *p = &vhub->ports[i]; /* Only keep the connected flag */ @@ -834,9 +832,31 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub) writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG); } +static void ast_vhub_init_desc(struct ast_vhub *vhub) +{ + /* Initialize vhub Device Descriptor. */ + memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc, + sizeof(vhub->vhub_dev_desc)); + + /* Initialize vhub Configuration Descriptor. */ + memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc, + sizeof(vhub->vhub_conf_desc)); + + /* Initialize vhub Hub Descriptor. */ + memcpy(&vhub->vhub_hub_desc, &ast_vhub_hub_desc, + sizeof(vhub->vhub_hub_desc)); + vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports; + + /* Initialize vhub String Descriptors. */ + memcpy(&vhub->vhub_str_desc, &ast_vhub_strings, + sizeof(vhub->vhub_str_desc)); +} + void ast_vhub_init_hub(struct ast_vhub *vhub) { vhub->speed = USB_SPEED_UNKNOWN; INIT_WORK(&vhub->wake_work, ast_vhub_wake_work); + + ast_vhub_init_desc(vhub); } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h index 761919e220d3..fac79ef6d669 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h +++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h @@ -2,6 +2,9 @@ #ifndef __ASPEED_VHUB_H #define __ASPEED_VHUB_H +#include <linux/usb.h> +#include <linux/usb/ch11.h> + /***************************** * * * VHUB register definitions * @@ -76,17 +79,9 @@ #define VHUB_SW_RESET_DEVICE2 (1 << 2) #define VHUB_SW_RESET_DEVICE1 (1 << 1) #define VHUB_SW_RESET_ROOT_HUB (1 << 0) -#define VHUB_SW_RESET_ALL (VHUB_SW_RESET_EP_POOL | \ - VHUB_SW_RESET_DMA_CONTROLLER | \ - VHUB_SW_RESET_DEVICE5 | \ - VHUB_SW_RESET_DEVICE4 | \ - VHUB_SW_RESET_DEVICE3 | \ - VHUB_SW_RESET_DEVICE2 | \ - VHUB_SW_RESET_DEVICE1 | \ - VHUB_SW_RESET_ROOT_HUB) + /* EP ACK/NACK IRQ masks */ #define VHUB_EP_IRQ(n) (1 << (n)) -#define VHUB_EP_IRQ_ALL 0x7fff /* 15 EPs */ /* USB status reg */ #define VHUB_USBSTS_HISPEED (1 << 27) @@ -210,6 +205,11 @@ * * ****************************************/ +/* + * AST_VHUB_NUM_GEN_EPs and AST_VHUB_NUM_PORTS are kept to avoid breaking + * existing AST2400/AST2500 platforms. AST2600 and future vhub revisions + * should define number of downstream ports and endpoints in device tree. + */ #define AST_VHUB_NUM_GEN_EPs 15 /* Generic non-0 EPs */ #define AST_VHUB_NUM_PORTS 5 /* vHub ports */ #define AST_VHUB_EP0_MAX_PACKET 64 /* EP0's max packet size */ @@ -312,7 +312,7 @@ struct ast_vhub_ep { /* Registers */ void __iomem *regs; - /* Index in global pool (0..14) */ + /* Index in global pool (zero-based) */ unsigned int g_idx; /* DMA Descriptors */ @@ -342,7 +342,7 @@ struct ast_vhub_dev { struct ast_vhub *vhub; void __iomem *regs; - /* Device index (0...4) and name string */ + /* Device index (zero-based) and name string */ unsigned int index; const char *name; @@ -358,7 +358,8 @@ struct ast_vhub_dev { /* Endpoint structures */ struct ast_vhub_ep ep0; - struct ast_vhub_ep *epns[AST_VHUB_NUM_GEN_EPs]; + struct ast_vhub_ep **epns; + u32 max_epns; }; #define to_ast_dev(__g) container_of(__g, struct ast_vhub_dev, gadget) @@ -373,6 +374,12 @@ struct ast_vhub_port { struct ast_vhub_dev dev; }; +struct ast_vhub_full_cdesc { + struct usb_config_descriptor cfg; + struct usb_interface_descriptor intf; + struct usb_endpoint_descriptor ep; +} __packed; + /* Global vhub structure */ struct ast_vhub { struct platform_device *pdev; @@ -393,10 +400,12 @@ struct ast_vhub { bool ep1_stalled : 1; /* Per-port info */ - struct ast_vhub_port ports[AST_VHUB_NUM_PORTS]; + struct ast_vhub_port *ports; + u32 max_ports; /* Generic EP data structures */ - struct ast_vhub_ep epns[AST_VHUB_NUM_GEN_EPs]; + struct ast_vhub_ep *epns; + u32 max_epns; /* Upstream bus is suspended ? */ bool suspended : 1; @@ -409,6 +418,12 @@ struct ast_vhub { /* Upstream bus speed captured at bus reset */ unsigned int speed; + + /* Standard USB Descriptors of the vhub. */ + struct usb_device_descriptor vhub_dev_desc; + struct ast_vhub_full_cdesc vhub_conf_desc; + struct usb_hub_descriptor vhub_hub_desc; + struct usb_gadget_strings vhub_str_desc; }; /* Standard request handlers result codes */ diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c index 1b2b548c59a0..eede5cedacb4 100644 --- a/drivers/usb/gadget/udc/at91_udc.c +++ b/drivers/usb/gadget/udc/at91_udc.c @@ -2021,7 +2021,7 @@ static struct platform_driver at91_udc_driver = { .suspend = at91udc_suspend, .resume = at91udc_resume, .driver = { - .name = (char *) driver_name, + .name = driver_name, .of_match_table = at91_udc_dt_ids, }, }; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 4c9d1e49d5ed..6e3e3ebf715f 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -1134,7 +1134,7 @@ static struct platform_driver dummy_udc_driver = { .suspend = dummy_udc_suspend, .resume = dummy_udc_resume, .driver = { - .name = (char *) gadget_name, + .name = gadget_name, }, }; @@ -2720,7 +2720,7 @@ static struct platform_driver dummy_hcd_driver = { .suspend = dummy_hcd_suspend, .resume = dummy_hcd_resume, .driver = { - .name = (char *) driver_name, + .name = driver_name, }, }; diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 21f3e6c4e4d6..d6ca50f01985 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -1199,7 +1199,7 @@ err: static struct platform_driver fotg210_driver = { .driver = { - .name = (char *)udc_name, + .name = udc_name, }, .probe = fotg210_udc_probe, .remove = fotg210_udc_remove, diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index ec6eda426223..febabde62f71 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -53,7 +53,6 @@ #define DMA_ADDR_INVALID (~(dma_addr_t)0) static const char driver_name[] = "fsl-usb2-udc"; -static const char driver_desc[] = DRIVER_DESC; static struct usb_dr_device __iomem *dr_regs; diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index 00e3f66836a9..9af8b415f303 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -1507,7 +1507,7 @@ clean_up: static struct platform_driver fusb300_driver = { .remove = fusb300_remove, .driver = { - .name = (char *) udc_name, + .name = udc_name, }, }; diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index 4a46f661d0e4..91dcb1995c27 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -1844,7 +1844,7 @@ static const struct pci_device_id pci_ids[] = { { MODULE_DEVICE_TABLE (pci, pci_ids); static struct pci_driver goku_pci_driver = { - .name = (char *) driver_name, + .name = driver_name, .id_table = pci_ids, .probe = goku_probe, diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index d14b2bb3f67c..cb997b82c008 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3267,7 +3267,7 @@ static struct platform_driver lpc32xx_udc_driver = { .suspend = lpc32xx_udc_suspend, .resume = lpc32xx_udc_resume, .driver = { - .name = (char *) driver_name, + .name = driver_name, .of_match_table = of_match_ptr(lpc32xx_udc_of_match), }, }; diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index a8288df6aadf..75d16a8902e6 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -1691,7 +1691,7 @@ clean_up: static struct platform_driver m66592_driver = { .remove = m66592_remove, .driver = { - .name = (char *) udc_name, + .name = udc_name, }, }; diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c new file mode 100644 index 000000000000..8fbc083b6732 --- /dev/null +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -0,0 +1,1331 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * MAX3420 Device Controller driver for USB. + * + * Author: Jaswinder Singh Brar <jaswinder.singh@linaro.org> + * (C) Copyright 2019-2020 Linaro Ltd + * + * Based on: + * o MAX3420E datasheet + * http://datasheets.maximintegrated.com/en/ds/MAX3420E.pdf + * o MAX342{0,1}E Programming Guides + * https://pdfserv.maximintegrated.com/en/an/AN3598.pdf + * https://pdfserv.maximintegrated.com/en/an/AN3785.pdf + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/bitfield.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/prefetch.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/spi/spi.h> +#include <linux/gpio/consumer.h> + +#define MAX3420_MAX_EPS 4 +#define MAX3420_EP_MAX_PACKET 64 /* Same for all Endpoints */ +#define MAX3420_EPNAME_SIZE 16 /* Buffer size for endpoint name */ + +#define MAX3420_ACKSTAT BIT(0) + +#define MAX3420_SPI_DIR_RD 0 /* read register from MAX3420 */ +#define MAX3420_SPI_DIR_WR 1 /* write register to MAX3420 */ + +/* SPI commands: */ +#define MAX3420_SPI_DIR_SHIFT 1 +#define MAX3420_SPI_REG_SHIFT 3 + +#define MAX3420_REG_EP0FIFO 0 +#define MAX3420_REG_EP1FIFO 1 +#define MAX3420_REG_EP2FIFO 2 +#define MAX3420_REG_EP3FIFO 3 +#define MAX3420_REG_SUDFIFO 4 +#define MAX3420_REG_EP0BC 5 +#define MAX3420_REG_EP1BC 6 +#define MAX3420_REG_EP2BC 7 +#define MAX3420_REG_EP3BC 8 + +#define MAX3420_REG_EPSTALLS 9 + #define ACKSTAT BIT(6) + #define STLSTAT BIT(5) + #define STLEP3IN BIT(4) + #define STLEP2IN BIT(3) + #define STLEP1OUT BIT(2) + #define STLEP0OUT BIT(1) + #define STLEP0IN BIT(0) + +#define MAX3420_REG_CLRTOGS 10 + #define EP3DISAB BIT(7) + #define EP2DISAB BIT(6) + #define EP1DISAB BIT(5) + #define CTGEP3IN BIT(4) + #define CTGEP2IN BIT(3) + #define CTGEP1OUT BIT(2) + +#define MAX3420_REG_EPIRQ 11 +#define MAX3420_REG_EPIEN 12 + #define SUDAVIRQ BIT(5) + #define IN3BAVIRQ BIT(4) + #define IN2BAVIRQ BIT(3) + #define OUT1DAVIRQ BIT(2) + #define OUT0DAVIRQ BIT(1) + #define IN0BAVIRQ BIT(0) + +#define MAX3420_REG_USBIRQ 13 +#define MAX3420_REG_USBIEN 14 + #define OSCOKIRQ BIT(0) + #define RWUDNIRQ BIT(1) + #define BUSACTIRQ BIT(2) + #define URESIRQ BIT(3) + #define SUSPIRQ BIT(4) + #define NOVBUSIRQ BIT(5) + #define VBUSIRQ BIT(6) + #define URESDNIRQ BIT(7) + +#define MAX3420_REG_USBCTL 15 + #define HOSCSTEN BIT(7) + #define VBGATE BIT(6) + #define CHIPRES BIT(5) + #define PWRDOWN BIT(4) + #define CONNECT BIT(3) + #define SIGRWU BIT(2) + +#define MAX3420_REG_CPUCTL 16 + #define IE BIT(0) + +#define MAX3420_REG_PINCTL 17 + #define EP3INAK BIT(7) + #define EP2INAK BIT(6) + #define EP0INAK BIT(5) + #define FDUPSPI BIT(4) + #define INTLEVEL BIT(3) + #define POSINT BIT(2) + #define GPXB BIT(1) + #define GPXA BIT(0) + +#define MAX3420_REG_REVISION 18 + +#define MAX3420_REG_FNADDR 19 + #define FNADDR_MASK 0x7f + +#define MAX3420_REG_IOPINS 20 +#define MAX3420_REG_IOPINS2 21 +#define MAX3420_REG_GPINIRQ 22 +#define MAX3420_REG_GPINIEN 23 +#define MAX3420_REG_GPINPOL 24 +#define MAX3420_REG_HIRQ 25 +#define MAX3420_REG_HIEN 26 +#define MAX3420_REG_MODE 27 +#define MAX3420_REG_PERADDR 28 +#define MAX3420_REG_HCTL 29 +#define MAX3420_REG_HXFR 30 +#define MAX3420_REG_HRSL 31 + +#define ENABLE_IRQ BIT(0) +#define IOPIN_UPDATE BIT(1) +#define REMOTE_WAKEUP BIT(2) +#define CONNECT_HOST GENMASK(4, 3) +#define HCONNECT (1 << 3) +#define HDISCONNECT (3 << 3) +#define UDC_START GENMASK(6, 5) +#define START (1 << 5) +#define STOP (3 << 5) +#define ENABLE_EP GENMASK(8, 7) +#define ENABLE (1 << 7) +#define DISABLE (3 << 7) +#define STALL_EP GENMASK(10, 9) +#define STALL (1 << 9) +#define UNSTALL (3 << 9) + +#define MAX3420_CMD(c) FIELD_PREP(GENMASK(7, 3), c) +#define MAX3420_SPI_CMD_RD(c) (MAX3420_CMD(c) | (0 << 1)) +#define MAX3420_SPI_CMD_WR(c) (MAX3420_CMD(c) | (1 << 1)) + +struct max3420_req { + struct usb_request usb_req; + struct list_head queue; + struct max3420_ep *ep; +}; + +struct max3420_ep { + struct usb_ep ep_usb; + struct max3420_udc *udc; + struct list_head queue; + char name[MAX3420_EPNAME_SIZE]; + unsigned int maxpacket; + spinlock_t lock; + int halted; + u32 todo; + int id; +}; + +struct max3420_udc { + struct usb_gadget gadget; + struct max3420_ep ep[MAX3420_MAX_EPS]; + struct usb_gadget_driver *driver; + struct task_struct *thread_task; + int remote_wkp, is_selfpowered; + bool vbus_active, softconnect; + struct usb_ctrlrequest setup; + struct mutex spi_bus_mutex; + struct max3420_req ep0req; + struct spi_device *spi; + struct device *dev; + spinlock_t lock; + bool suspended; + u8 ep0buf[64]; + u32 todo; +}; + +#define to_max3420_req(r) container_of((r), struct max3420_req, usb_req) +#define to_max3420_ep(e) container_of((e), struct max3420_ep, ep_usb) +#define to_udc(g) container_of((g), struct max3420_udc, gadget) + +#define DRIVER_DESC "MAX3420 USB Device-Mode Driver" +static const char driver_name[] = "max3420-udc"; + +/* Control endpoint configuration.*/ +static const struct usb_endpoint_descriptor ep0_desc = { + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, + .wMaxPacketSize = cpu_to_le16(MAX3420_EP_MAX_PACKET), +}; + +static void spi_ack_ctrl(struct max3420_udc *udc) +{ + struct spi_device *spi = udc->spi; + struct spi_transfer transfer; + struct spi_message msg; + u8 txdata[1]; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + txdata[0] = MAX3420_ACKSTAT; + transfer.tx_buf = txdata; + transfer.len = 1; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); +} + +static u8 spi_rd8_ack(struct max3420_udc *udc, u8 reg, int actstat) +{ + struct spi_device *spi = udc->spi; + struct spi_transfer transfer; + struct spi_message msg; + u8 txdata[2], rxdata[2]; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + txdata[0] = MAX3420_SPI_CMD_RD(reg) | (actstat ? MAX3420_ACKSTAT : 0); + transfer.tx_buf = txdata; + transfer.rx_buf = rxdata; + transfer.len = 2; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); + + return rxdata[1]; +} + +static u8 spi_rd8(struct max3420_udc *udc, u8 reg) +{ + return spi_rd8_ack(udc, reg, 0); +} + +static void spi_wr8_ack(struct max3420_udc *udc, u8 reg, u8 val, int actstat) +{ + struct spi_device *spi = udc->spi; + struct spi_transfer transfer; + struct spi_message msg; + u8 txdata[2]; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + txdata[0] = MAX3420_SPI_CMD_WR(reg) | (actstat ? MAX3420_ACKSTAT : 0); + txdata[1] = val; + + transfer.tx_buf = txdata; + transfer.len = 2; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); +} + +static void spi_wr8(struct max3420_udc *udc, u8 reg, u8 val) +{ + spi_wr8_ack(udc, reg, val, 0); +} + +static void spi_rd_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len) +{ + struct spi_device *spi = udc->spi; + struct spi_transfer transfer; + struct spi_message msg; + u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {}; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + local_buf[0] = MAX3420_SPI_CMD_RD(reg); + transfer.tx_buf = &local_buf[0]; + transfer.rx_buf = &local_buf[0]; + transfer.len = len + 1; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); + + memcpy(buf, &local_buf[1], len); +} + +static void spi_wr_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len) +{ + struct spi_device *spi = udc->spi; + struct spi_transfer transfer; + struct spi_message msg; + u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {}; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + local_buf[0] = MAX3420_SPI_CMD_WR(reg); + memcpy(&local_buf[1], buf, len); + + transfer.tx_buf = local_buf; + transfer.len = len + 1; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); +} + +static int spi_max3420_enable(struct max3420_ep *ep) +{ + struct max3420_udc *udc = ep->udc; + unsigned long flags; + u8 epdis, epien; + int todo; + + spin_lock_irqsave(&ep->lock, flags); + todo = ep->todo & ENABLE_EP; + ep->todo &= ~ENABLE_EP; + spin_unlock_irqrestore(&ep->lock, flags); + + if (!todo || ep->id == 0) + return false; + + epien = spi_rd8(udc, MAX3420_REG_EPIEN); + epdis = spi_rd8(udc, MAX3420_REG_CLRTOGS); + + if (todo == ENABLE) { + epdis &= ~BIT(ep->id + 4); + epien |= BIT(ep->id + 1); + } else { + epdis |= BIT(ep->id + 4); + epien &= ~BIT(ep->id + 1); + } + + spi_wr8(udc, MAX3420_REG_CLRTOGS, epdis); + spi_wr8(udc, MAX3420_REG_EPIEN, epien); + + return true; +} + +static int spi_max3420_stall(struct max3420_ep *ep) +{ + struct max3420_udc *udc = ep->udc; + unsigned long flags; + u8 epstalls; + int todo; + + spin_lock_irqsave(&ep->lock, flags); + todo = ep->todo & STALL_EP; + ep->todo &= ~STALL_EP; + spin_unlock_irqrestore(&ep->lock, flags); + + if (!todo || ep->id == 0) + return false; + + epstalls = spi_rd8(udc, MAX3420_REG_EPSTALLS); + if (todo == STALL) { + ep->halted = 1; + epstalls |= BIT(ep->id + 1); + } else { + u8 clrtogs; + + ep->halted = 0; + epstalls &= ~BIT(ep->id + 1); + clrtogs = spi_rd8(udc, MAX3420_REG_CLRTOGS); + clrtogs |= BIT(ep->id + 1); + spi_wr8(udc, MAX3420_REG_CLRTOGS, clrtogs); + } + spi_wr8(udc, MAX3420_REG_EPSTALLS, epstalls | ACKSTAT); + + return true; +} + +static int spi_max3420_rwkup(struct max3420_udc *udc) +{ + unsigned long flags; + int wake_remote; + u8 usbctl; + + spin_lock_irqsave(&udc->lock, flags); + wake_remote = udc->todo & REMOTE_WAKEUP; + udc->todo &= ~REMOTE_WAKEUP; + spin_unlock_irqrestore(&udc->lock, flags); + + if (!wake_remote || !udc->suspended) + return false; + + /* Set Remote-WkUp Signal*/ + usbctl = spi_rd8(udc, MAX3420_REG_USBCTL); + usbctl |= SIGRWU; + spi_wr8(udc, MAX3420_REG_USBCTL, usbctl); + + msleep_interruptible(5); + + /* Clear Remote-WkUp Signal*/ + usbctl = spi_rd8(udc, MAX3420_REG_USBCTL); + usbctl &= ~SIGRWU; + spi_wr8(udc, MAX3420_REG_USBCTL, usbctl); + + udc->suspended = false; + + return true; +} + +static void max3420_nuke(struct max3420_ep *ep, int status); +static void __max3420_stop(struct max3420_udc *udc) +{ + u8 val; + int i; + + /* clear all pending requests */ + for (i = 1; i < MAX3420_MAX_EPS; i++) + max3420_nuke(&udc->ep[i], -ECONNRESET); + + /* Disable IRQ to CPU */ + spi_wr8(udc, MAX3420_REG_CPUCTL, 0); + + val = spi_rd8(udc, MAX3420_REG_USBCTL); + val |= PWRDOWN; + if (udc->is_selfpowered) + val &= ~HOSCSTEN; + else + val |= HOSCSTEN; + spi_wr8(udc, MAX3420_REG_USBCTL, val); +} + +static void __max3420_start(struct max3420_udc *udc) +{ + u8 val; + + /* Need this delay if bus-powered, + * but even for self-powered it helps stability + */ + msleep_interruptible(250); + + /* configure SPI */ + spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI); + + /* Chip Reset */ + spi_wr8(udc, MAX3420_REG_USBCTL, CHIPRES); + msleep_interruptible(5); + spi_wr8(udc, MAX3420_REG_USBCTL, 0); + + /* Poll for OSC to stabilize */ + while (1) { + val = spi_rd8(udc, MAX3420_REG_USBIRQ); + if (val & OSCOKIRQ) + break; + cond_resched(); + } + + /* Enable PULL-UP only when Vbus detected */ + val = spi_rd8(udc, MAX3420_REG_USBCTL); + val |= VBGATE | CONNECT; + spi_wr8(udc, MAX3420_REG_USBCTL, val); + + val = URESDNIRQ | URESIRQ; + if (udc->is_selfpowered) + val |= NOVBUSIRQ; + spi_wr8(udc, MAX3420_REG_USBIEN, val); + + /* Enable only EP0 interrupts */ + val = IN0BAVIRQ | OUT0DAVIRQ | SUDAVIRQ; + spi_wr8(udc, MAX3420_REG_EPIEN, val); + + /* Enable IRQ to CPU */ + spi_wr8(udc, MAX3420_REG_CPUCTL, IE); +} + +static int max3420_start(struct max3420_udc *udc) +{ + unsigned long flags; + int todo; + + spin_lock_irqsave(&udc->lock, flags); + todo = udc->todo & UDC_START; + udc->todo &= ~UDC_START; + spin_unlock_irqrestore(&udc->lock, flags); + + if (!todo) + return false; + + if (udc->vbus_active && udc->softconnect) + __max3420_start(udc); + else + __max3420_stop(udc); + + return true; +} + +static irqreturn_t max3420_vbus_handler(int irq, void *dev_id) +{ + struct max3420_udc *udc = dev_id; + unsigned long flags; + + spin_lock_irqsave(&udc->lock, flags); + /* its a vbus change interrupt */ + udc->vbus_active = !udc->vbus_active; + udc->todo |= UDC_START; + usb_udc_vbus_handler(&udc->gadget, udc->vbus_active); + usb_gadget_set_state(&udc->gadget, udc->vbus_active + ? USB_STATE_POWERED : USB_STATE_NOTATTACHED); + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->thread_task && + udc->thread_task->state != TASK_RUNNING) + wake_up_process(udc->thread_task); + + return IRQ_HANDLED; +} + +static irqreturn_t max3420_irq_handler(int irq, void *dev_id) +{ + struct max3420_udc *udc = dev_id; + struct spi_device *spi = udc->spi; + unsigned long flags; + + spin_lock_irqsave(&udc->lock, flags); + if ((udc->todo & ENABLE_IRQ) == 0) { + disable_irq_nosync(spi->irq); + udc->todo |= ENABLE_IRQ; + } + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->thread_task && + udc->thread_task->state != TASK_RUNNING) + wake_up_process(udc->thread_task); + + return IRQ_HANDLED; +} + +static void max3420_getstatus(struct max3420_udc *udc) +{ + struct max3420_ep *ep; + u16 status = 0; + + switch (udc->setup.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + /* Get device status */ + status = udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED; + status |= (udc->remote_wkp << USB_DEVICE_REMOTE_WAKEUP); + break; + case USB_RECIP_INTERFACE: + if (udc->driver->setup(&udc->gadget, &udc->setup) < 0) + goto stall; + break; + case USB_RECIP_ENDPOINT: + ep = &udc->ep[udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK]; + if (udc->setup.wIndex & USB_DIR_IN) { + if (!ep->ep_usb.caps.dir_in) + goto stall; + } else { + if (!ep->ep_usb.caps.dir_out) + goto stall; + } + if (ep->halted) + status = 1 << USB_ENDPOINT_HALT; + break; + default: + goto stall; + } + + status = cpu_to_le16(status); + spi_wr_buf(udc, MAX3420_REG_EP0FIFO, &status, 2); + spi_wr8_ack(udc, MAX3420_REG_EP0BC, 2, 1); + return; +stall: + dev_err(udc->dev, "Can't respond to getstatus request\n"); + spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT); +} + +static void max3420_set_clear_feature(struct max3420_udc *udc) +{ + struct max3420_ep *ep; + int set = udc->setup.bRequest == USB_REQ_SET_FEATURE; + unsigned long flags; + int id; + + switch (udc->setup.bRequestType) { + case USB_RECIP_DEVICE: + if (udc->setup.wValue != USB_DEVICE_REMOTE_WAKEUP) + break; + + if (udc->setup.bRequest == USB_REQ_SET_FEATURE) + udc->remote_wkp = 1; + else + udc->remote_wkp = 0; + + return spi_ack_ctrl(udc); + + case USB_RECIP_ENDPOINT: + if (udc->setup.wValue != USB_ENDPOINT_HALT) + break; + + id = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + ep = &udc->ep[id]; + + spin_lock_irqsave(&ep->lock, flags); + ep->todo &= ~STALL_EP; + if (set) + ep->todo |= STALL; + else + ep->todo |= UNSTALL; + spin_unlock_irqrestore(&ep->lock, flags); + + spi_max3420_stall(ep); + return; + default: + break; + } + + dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n"); + spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT); +} + +static void max3420_handle_setup(struct max3420_udc *udc) +{ + struct usb_ctrlrequest setup; + u8 addr; + + spi_rd_buf(udc, MAX3420_REG_SUDFIFO, (void *)&setup, 8); + + udc->setup = setup; + udc->setup.wValue = cpu_to_le16(setup.wValue); + udc->setup.wIndex = cpu_to_le16(setup.wIndex); + udc->setup.wLength = cpu_to_le16(setup.wLength); + + switch (udc->setup.bRequest) { + case USB_REQ_GET_STATUS: + /* Data+Status phase form udc */ + if ((udc->setup.bRequestType & + (USB_DIR_IN | USB_TYPE_MASK)) != + (USB_DIR_IN | USB_TYPE_STANDARD)) { + break; + } + return max3420_getstatus(udc); + case USB_REQ_SET_ADDRESS: + /* Status phase from udc */ + if (udc->setup.bRequestType != (USB_DIR_OUT | + USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { + break; + } + addr = spi_rd8_ack(udc, MAX3420_REG_FNADDR, 1); + dev_dbg(udc->dev, "Assigned Address=%d\n", udc->setup.wValue); + return; + case USB_REQ_CLEAR_FEATURE: + case USB_REQ_SET_FEATURE: + /* Requests with no data phase, status phase from udc */ + if ((udc->setup.bRequestType & USB_TYPE_MASK) + != USB_TYPE_STANDARD) + break; + return max3420_set_clear_feature(udc); + default: + break; + } + + if (udc->driver->setup(&udc->gadget, &setup) < 0) { + /* Stall EP0 */ + spi_wr8(udc, MAX3420_REG_EPSTALLS, + STLEP0IN | STLEP0OUT | STLSTAT); + } +} + +static void max3420_req_done(struct max3420_req *req, int status) +{ + struct max3420_ep *ep = req->ep; + struct max3420_udc *udc = ep->udc; + + if (req->usb_req.status == -EINPROGRESS) + req->usb_req.status = status; + else + status = req->usb_req.status; + + if (status && status != -ESHUTDOWN) + dev_err(udc->dev, "%s done %p, status %d\n", + ep->ep_usb.name, req, status); + + if (req->usb_req.complete) + req->usb_req.complete(&ep->ep_usb, &req->usb_req); +} + +static int max3420_do_data(struct max3420_udc *udc, int ep_id, int in) +{ + struct max3420_ep *ep = &udc->ep[ep_id]; + struct max3420_req *req; + int done, length, psz; + void *buf; + + if (list_empty(&ep->queue)) + return false; + + req = list_first_entry(&ep->queue, struct max3420_req, queue); + buf = req->usb_req.buf + req->usb_req.actual; + + psz = ep->ep_usb.maxpacket; + length = req->usb_req.length - req->usb_req.actual; + length = min(length, psz); + + if (length == 0) { + done = 1; + goto xfer_done; + } + + done = 0; + if (in) { + prefetch(buf); + spi_wr_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length); + spi_wr8(udc, MAX3420_REG_EP0BC + ep_id, length); + if (length < psz) + done = 1; + } else { + psz = spi_rd8(udc, MAX3420_REG_EP0BC + ep_id); + length = min(length, psz); + prefetchw(buf); + spi_rd_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length); + if (length < ep->ep_usb.maxpacket) + done = 1; + } + + req->usb_req.actual += length; + + if (req->usb_req.actual == req->usb_req.length) + done = 1; + +xfer_done: + if (done) { + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + list_del_init(&req->queue); + spin_unlock_irqrestore(&ep->lock, flags); + + if (ep_id == 0) + spi_ack_ctrl(udc); + + max3420_req_done(req, 0); + } + + return true; +} + +static int max3420_handle_irqs(struct max3420_udc *udc) +{ + u8 epien, epirq, usbirq, usbien, reg[4]; + bool ret = false; + + spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 4); + epirq = reg[0]; + epien = reg[1]; + usbirq = reg[2]; + usbien = reg[3]; + + usbirq &= usbien; + epirq &= epien; + + if (epirq & SUDAVIRQ) { + spi_wr8(udc, MAX3420_REG_EPIRQ, SUDAVIRQ); + max3420_handle_setup(udc); + return true; + } + + if (usbirq & VBUSIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, VBUSIRQ); + dev_dbg(udc->dev, "Cable plugged in\n"); + return true; + } + + if (usbirq & NOVBUSIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, NOVBUSIRQ); + dev_dbg(udc->dev, "Cable pulled out\n"); + return true; + } + + if (usbirq & URESIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, URESIRQ); + dev_dbg(udc->dev, "USB Reset - Start\n"); + return true; + } + + if (usbirq & URESDNIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, URESDNIRQ); + dev_dbg(udc->dev, "USB Reset - END\n"); + spi_wr8(udc, MAX3420_REG_USBIEN, URESDNIRQ | URESIRQ); + spi_wr8(udc, MAX3420_REG_EPIEN, SUDAVIRQ | IN0BAVIRQ + | OUT0DAVIRQ); + return true; + } + + if (usbirq & SUSPIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, SUSPIRQ); + dev_dbg(udc->dev, "USB Suspend - Enter\n"); + udc->suspended = true; + return true; + } + + if (usbirq & BUSACTIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, BUSACTIRQ); + dev_dbg(udc->dev, "USB Suspend - Exit\n"); + udc->suspended = false; + return true; + } + + if (usbirq & RWUDNIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, RWUDNIRQ); + dev_dbg(udc->dev, "Asked Host to wakeup\n"); + return true; + } + + if (usbirq & OSCOKIRQ) { + spi_wr8(udc, MAX3420_REG_USBIRQ, OSCOKIRQ); + dev_dbg(udc->dev, "Osc stabilized, start work\n"); + return true; + } + + if (epirq & OUT0DAVIRQ && max3420_do_data(udc, 0, 0)) { + spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT0DAVIRQ, 1); + ret = true; + } + + if (epirq & IN0BAVIRQ && max3420_do_data(udc, 0, 1)) + ret = true; + + if (epirq & OUT1DAVIRQ && max3420_do_data(udc, 1, 0)) { + spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT1DAVIRQ, 1); + ret = true; + } + + if (epirq & IN2BAVIRQ && max3420_do_data(udc, 2, 1)) + ret = true; + + if (epirq & IN3BAVIRQ && max3420_do_data(udc, 3, 1)) + ret = true; + + return ret; +} + +static int max3420_thread(void *dev_id) +{ + struct max3420_udc *udc = dev_id; + struct spi_device *spi = udc->spi; + int i, loop_again = 1; + unsigned long flags; + + while (!kthread_should_stop()) { + if (!loop_again) { + ktime_t kt = ns_to_ktime(1000 * 1000 * 250); /* 250ms */ + + set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&udc->lock, flags); + if (udc->todo & ENABLE_IRQ) { + enable_irq(spi->irq); + udc->todo &= ~ENABLE_IRQ; + } + spin_unlock_irqrestore(&udc->lock, flags); + + schedule_hrtimeout(&kt, HRTIMER_MODE_REL); + } + loop_again = 0; + + mutex_lock(&udc->spi_bus_mutex); + + /* If bus-vbus_active and disconnected */ + if (!udc->vbus_active || !udc->softconnect) + goto loop; + + if (max3420_start(udc)) { + loop_again = 1; + goto loop; + } + + if (max3420_handle_irqs(udc)) { + loop_again = 1; + goto loop; + } + + if (spi_max3420_rwkup(udc)) { + loop_again = 1; + goto loop; + } + + max3420_do_data(udc, 0, 1); /* get done with the EP0 ZLP */ + + for (i = 1; i < MAX3420_MAX_EPS; i++) { + struct max3420_ep *ep = &udc->ep[i]; + + if (spi_max3420_enable(ep)) + loop_again = 1; + if (spi_max3420_stall(ep)) + loop_again = 1; + } +loop: + mutex_unlock(&udc->spi_bus_mutex); + } + + set_current_state(TASK_RUNNING); + dev_info(udc->dev, "SPI thread exiting"); + return 0; +} + +static int max3420_ep_set_halt(struct usb_ep *_ep, int stall) +{ + struct max3420_ep *ep = to_max3420_ep(_ep); + struct max3420_udc *udc = ep->udc; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + + ep->todo &= ~STALL_EP; + if (stall) + ep->todo |= STALL; + else + ep->todo |= UNSTALL; + + spin_unlock_irqrestore(&ep->lock, flags); + + wake_up_process(udc->thread_task); + + dev_dbg(udc->dev, "%sStall %s\n", stall ? "" : "Un", ep->name); + return 0; +} + +static int __max3420_ep_enable(struct max3420_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + unsigned int maxp = usb_endpoint_maxp(desc); + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + ep->ep_usb.desc = desc; + ep->ep_usb.maxpacket = maxp; + + ep->todo &= ~ENABLE_EP; + ep->todo |= ENABLE; + spin_unlock_irqrestore(&ep->lock, flags); + + return 0; +} + +static int max3420_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) +{ + struct max3420_ep *ep = to_max3420_ep(_ep); + struct max3420_udc *udc = ep->udc; + + __max3420_ep_enable(ep, desc); + + wake_up_process(udc->thread_task); + + return 0; +} + +static void max3420_nuke(struct max3420_ep *ep, int status) +{ + struct max3420_req *req, *r; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + + list_for_each_entry_safe(req, r, &ep->queue, queue) { + list_del_init(&req->queue); + + spin_unlock_irqrestore(&ep->lock, flags); + max3420_req_done(req, status); + spin_lock_irqsave(&ep->lock, flags); + } + + spin_unlock_irqrestore(&ep->lock, flags); +} + +static void __max3420_ep_disable(struct max3420_ep *ep) +{ + struct max3420_udc *udc = ep->udc; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + + ep->ep_usb.desc = NULL; + + ep->todo &= ~ENABLE_EP; + ep->todo |= DISABLE; + + spin_unlock_irqrestore(&ep->lock, flags); + + dev_dbg(udc->dev, "Disabled %s\n", ep->name); +} + +static int max3420_ep_disable(struct usb_ep *_ep) +{ + struct max3420_ep *ep = to_max3420_ep(_ep); + struct max3420_udc *udc = ep->udc; + + max3420_nuke(ep, -ESHUTDOWN); + + __max3420_ep_disable(ep); + + wake_up_process(udc->thread_task); + + return 0; +} + +static struct usb_request *max3420_alloc_request(struct usb_ep *_ep, + gfp_t gfp_flags) +{ + struct max3420_ep *ep = to_max3420_ep(_ep); + struct max3420_req *req; + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + return NULL; + + req->ep = ep; + + return &req->usb_req; +} + +static void max3420_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + kfree(to_max3420_req(_req)); +} + +static int max3420_ep_queue(struct usb_ep *_ep, struct usb_request *_req, + gfp_t ignored) +{ + struct max3420_req *req = to_max3420_req(_req); + struct max3420_ep *ep = to_max3420_ep(_ep); + struct max3420_udc *udc = ep->udc; + unsigned long flags; + + _req->status = -EINPROGRESS; + _req->actual = 0; + + spin_lock_irqsave(&ep->lock, flags); + list_add_tail(&req->queue, &ep->queue); + spin_unlock_irqrestore(&ep->lock, flags); + + wake_up_process(udc->thread_task); + return 0; +} + +static int max3420_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct max3420_req *t, *req = to_max3420_req(_req); + struct max3420_ep *ep = to_max3420_ep(_ep); + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + + /* Pluck the descriptor from queue */ + list_for_each_entry(t, &ep->queue, queue) + if (t == req) { + list_del_init(&req->queue); + break; + } + + spin_unlock_irqrestore(&ep->lock, flags); + + if (t == req) + max3420_req_done(req, -ECONNRESET); + + return 0; +} + +static const struct usb_ep_ops max3420_ep_ops = { + .enable = max3420_ep_enable, + .disable = max3420_ep_disable, + .alloc_request = max3420_alloc_request, + .free_request = max3420_free_request, + .queue = max3420_ep_queue, + .dequeue = max3420_ep_dequeue, + .set_halt = max3420_ep_set_halt, +}; + +static int max3420_wakeup(struct usb_gadget *gadget) +{ + struct max3420_udc *udc = to_udc(gadget); + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&udc->lock, flags); + + /* Only if wakeup allowed by host */ + if (udc->remote_wkp) { + udc->todo |= REMOTE_WAKEUP; + ret = 0; + } + + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->thread_task && + udc->thread_task->state != TASK_RUNNING) + wake_up_process(udc->thread_task); + return ret; +} + +static int max3420_udc_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct max3420_udc *udc = to_udc(gadget); + unsigned long flags; + + spin_lock_irqsave(&udc->lock, flags); + /* hook up the driver */ + driver->driver.bus = NULL; + udc->driver = driver; + udc->gadget.speed = USB_SPEED_FULL; + + udc->gadget.is_selfpowered = udc->is_selfpowered; + udc->remote_wkp = 0; + udc->softconnect = true; + udc->todo |= UDC_START; + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->thread_task && + udc->thread_task->state != TASK_RUNNING) + wake_up_process(udc->thread_task); + + return 0; +} + +static int max3420_udc_stop(struct usb_gadget *gadget) +{ + struct max3420_udc *udc = to_udc(gadget); + unsigned long flags; + + spin_lock_irqsave(&udc->lock, flags); + udc->is_selfpowered = udc->gadget.is_selfpowered; + udc->gadget.speed = USB_SPEED_UNKNOWN; + udc->driver = NULL; + udc->softconnect = false; + udc->todo |= UDC_START; + spin_unlock_irqrestore(&udc->lock, flags); + + if (udc->thread_task && + udc->thread_task->state != TASK_RUNNING) + wake_up_process(udc->thread_task); + + return 0; +} + +static const struct usb_gadget_ops max3420_udc_ops = { + .udc_start = max3420_udc_start, + .udc_stop = max3420_udc_stop, + .wakeup = max3420_wakeup, +}; + +static void max3420_eps_init(struct max3420_udc *udc) +{ + int idx; + + INIT_LIST_HEAD(&udc->gadget.ep_list); + + for (idx = 0; idx < MAX3420_MAX_EPS; idx++) { + struct max3420_ep *ep = &udc->ep[idx]; + + spin_lock_init(&ep->lock); + INIT_LIST_HEAD(&ep->queue); + + ep->udc = udc; + ep->id = idx; + ep->halted = 0; + ep->maxpacket = 0; + ep->ep_usb.name = ep->name; + ep->ep_usb.ops = &max3420_ep_ops; + usb_ep_set_maxpacket_limit(&ep->ep_usb, MAX3420_EP_MAX_PACKET); + + if (idx == 0) { /* For EP0 */ + ep->ep_usb.desc = &ep0_desc; + ep->ep_usb.maxpacket = usb_endpoint_maxp(&ep0_desc); + ep->ep_usb.caps.type_control = true; + ep->ep_usb.caps.dir_in = true; + ep->ep_usb.caps.dir_out = true; + snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep0"); + continue; + } + + if (idx == 1) { /* EP1 is OUT */ + ep->ep_usb.caps.dir_in = false; + ep->ep_usb.caps.dir_out = true; + snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep1-bulk-out"); + } else { /* EP2 & EP3 are IN */ + ep->ep_usb.caps.dir_in = true; + ep->ep_usb.caps.dir_out = false; + snprintf(ep->name, MAX3420_EPNAME_SIZE, + "ep%d-bulk-in", idx); + } + ep->ep_usb.caps.type_iso = false; + ep->ep_usb.caps.type_int = false; + ep->ep_usb.caps.type_bulk = true; + + list_add_tail(&ep->ep_usb.ep_list, + &udc->gadget.ep_list); + } +} + +static int max3420_probe(struct spi_device *spi) +{ + struct max3420_udc *udc; + int err, irq; + u8 reg[8]; + + if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) { + dev_err(&spi->dev, "UDC needs full duplex to work\n"); + return -EINVAL; + } + + spi->mode = SPI_MODE_3; + spi->bits_per_word = 8; + + err = spi_setup(spi); + if (err) { + dev_err(&spi->dev, "Unable to setup SPI bus\n"); + return -EFAULT; + } + + udc = devm_kzalloc(&spi->dev, sizeof(*udc), GFP_KERNEL); + if (!udc) + return -ENOMEM; + + udc->spi = spi; + + udc->remote_wkp = 0; + + /* Setup gadget structure */ + udc->gadget.ops = &max3420_udc_ops; + udc->gadget.max_speed = USB_SPEED_FULL; + udc->gadget.speed = USB_SPEED_UNKNOWN; + udc->gadget.ep0 = &udc->ep[0].ep_usb; + udc->gadget.name = driver_name; + + spin_lock_init(&udc->lock); + mutex_init(&udc->spi_bus_mutex); + + udc->ep0req.ep = &udc->ep[0]; + udc->ep0req.usb_req.buf = udc->ep0buf; + INIT_LIST_HEAD(&udc->ep0req.queue); + + /* setup Endpoints */ + max3420_eps_init(udc); + + /* configure SPI */ + spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8); + spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI); + + err = usb_add_gadget_udc(&spi->dev, &udc->gadget); + if (err) + return err; + + udc->dev = &udc->gadget.dev; + + spi_set_drvdata(spi, udc); + + irq = of_irq_get_byname(spi->dev.of_node, "udc"); + err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0, + "max3420", udc); + if (err < 0) + return err; + + udc->thread_task = kthread_create(max3420_thread, udc, + "max3420-thread"); + if (IS_ERR(udc->thread_task)) + return PTR_ERR(udc->thread_task); + + irq = of_irq_get_byname(spi->dev.of_node, "vbus"); + if (irq <= 0) { /* no vbus irq implies self-powered design */ + udc->is_selfpowered = 1; + udc->vbus_active = true; + udc->todo |= UDC_START; + usb_udc_vbus_handler(&udc->gadget, udc->vbus_active); + usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED); + max3420_start(udc); + } else { + udc->is_selfpowered = 0; + /* Detect current vbus status */ + spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8); + if (reg[7] != 0xff) + udc->vbus_active = true; + + err = devm_request_irq(&spi->dev, irq, + max3420_vbus_handler, 0, "vbus", udc); + if (err < 0) + return err; + } + + return 0; +} + +static int max3420_remove(struct spi_device *spi) +{ + struct max3420_udc *udc = spi_get_drvdata(spi); + unsigned long flags; + + usb_del_gadget_udc(&udc->gadget); + + spin_lock_irqsave(&udc->lock, flags); + + kthread_stop(udc->thread_task); + + spin_unlock_irqrestore(&udc->lock, flags); + + return 0; +} + +static const struct of_device_id max3420_udc_of_match[] = { + { .compatible = "maxim,max3420-udc"}, + { .compatible = "maxim,max3421-udc"}, + {}, +}; +MODULE_DEVICE_TABLE(of, max3420_udc_of_match); + +static struct spi_driver max3420_driver = { + .driver = { + .name = "max3420-udc", + .of_match_table = of_match_ptr(max3420_udc_of_match), + }, + .probe = max3420_probe, + .remove = max3420_remove, +}; + +module_spi_driver(max3420_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 1fd1b9186e46..5eff85eeaa5a 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -2861,6 +2861,8 @@ static void ep_clear_seqnum(struct net2280_ep *ep) static void handle_stat0_irqs_superspeed(struct net2280 *dev, struct net2280_ep *ep, struct usb_ctrlrequest r) { + struct net2280_ep *e; + u16 status; int tmp = 0; #define w_value le16_to_cpu(r.wValue) @@ -2868,9 +2870,6 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev, #define w_length le16_to_cpu(r.wLength) switch (r.bRequest) { - struct net2280_ep *e; - u16 status; - case USB_REQ_SET_CONFIGURATION: dev->addressed_state = !w_value; goto usb3_delegate; @@ -3857,7 +3856,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver net2280_pci_driver = { - .name = (char *) driver_name, + .name = driver_name, .id_table = pci_ids, .probe = net2280_probe, diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c index bd12417996db..bf87c6c0d7f6 100644 --- a/drivers/usb/gadget/udc/omap_udc.c +++ b/drivers/usb/gadget/udc/omap_udc.c @@ -3001,7 +3001,7 @@ static struct platform_driver udc_driver = { .suspend = omap_udc_suspend, .resume = omap_udc_resume, .driver = { - .name = (char *) driver_name, + .name = driver_name, }, }; diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index 582a16165ea9..537094b485bf 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -1968,7 +1968,7 @@ clean_up2: static struct platform_driver r8a66597_driver = { .remove = r8a66597_remove, .driver = { - .name = (char *) udc_name, + .name = udc_name, }, }; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index c5c3c14df67a..0c418ce50ba0 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2355,14 +2355,14 @@ static const struct usb_gadget_ops renesas_usb3_gadget_ops = { .set_selfpowered = renesas_usb3_set_selfpowered, }; -static enum usb_role renesas_usb3_role_switch_get(struct device *dev) +static enum usb_role renesas_usb3_role_switch_get(struct usb_role_switch *sw) { - struct renesas_usb3 *usb3 = dev_get_drvdata(dev); + struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw); enum usb_role cur_role; - pm_runtime_get_sync(dev); + pm_runtime_get_sync(usb3_to_dev(usb3)); cur_role = usb3_is_host(usb3) ? USB_ROLE_HOST : USB_ROLE_DEVICE; - pm_runtime_put(dev); + pm_runtime_put(usb3_to_dev(usb3)); return cur_role; } @@ -2372,7 +2372,7 @@ static void handle_ext_role_switch_states(struct device *dev, { struct renesas_usb3 *usb3 = dev_get_drvdata(dev); struct device *host = usb3->host_dev; - enum usb_role cur_role = renesas_usb3_role_switch_get(dev); + enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw); switch (role) { case USB_ROLE_NONE: @@ -2424,7 +2424,7 @@ static void handle_role_switch_states(struct device *dev, { struct renesas_usb3 *usb3 = dev_get_drvdata(dev); struct device *host = usb3->host_dev; - enum usb_role cur_role = renesas_usb3_role_switch_get(dev); + enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw); if (cur_role == USB_ROLE_HOST && role == USB_ROLE_DEVICE) { device_release_driver(host); @@ -2438,19 +2438,19 @@ static void handle_role_switch_states(struct device *dev, } } -static int renesas_usb3_role_switch_set(struct device *dev, +static int renesas_usb3_role_switch_set(struct usb_role_switch *sw, enum usb_role role) { - struct renesas_usb3 *usb3 = dev_get_drvdata(dev); + struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw); - pm_runtime_get_sync(dev); + pm_runtime_get_sync(usb3_to_dev(usb3)); if (usb3->role_sw_by_connector) - handle_ext_role_switch_states(dev, role); + handle_ext_role_switch_states(usb3_to_dev(usb3), role); else - handle_role_switch_states(dev, role); + handle_role_switch_states(usb3_to_dev(usb3), role); - pm_runtime_put(dev); + pm_runtime_put(usb3_to_dev(usb3)); return 0; } @@ -2831,6 +2831,8 @@ static int renesas_usb3_probe(struct platform_device *pdev) renesas_usb3_role_switch_desc.fwnode = dev_fwnode(&pdev->dev); } + renesas_usb3_role_switch_desc.driver_data = usb3; + INIT_WORK(&usb3->role_work, renesas_usb3_role_work); usb3->role_sw = usb_role_switch_register(&pdev->dev, &renesas_usb3_role_switch_desc); @@ -2906,7 +2908,7 @@ static struct platform_driver renesas_usb3_driver = { .probe = renesas_usb3_probe, .remove = renesas_usb3_remove, .driver = { - .name = (char *)udc_name, + .name = udc_name, .pm = &renesas_usb3_pm_ops, .of_match_table = of_match_ptr(usb3_of_match), }, diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c index 21252fbc0319..aaca1b0a2f59 100644 --- a/drivers/usb/gadget/udc/s3c-hsudc.c +++ b/drivers/usb/gadget/udc/s3c-hsudc.c @@ -1285,7 +1285,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies), hsudc->supplies); if (ret != 0) { - dev_err(dev, "failed to request supplies: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to request supplies: %d\n", ret); goto err_supplies; } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 634c2c19a176..52a6add961f4 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -26,7 +26,9 @@ #include <linux/reset.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> +#include <linux/usb/otg.h> #include <linux/usb/role.h> +#include <linux/usb/phy.h> #include <linux/workqueue.h> /* XUSB_DEV registers */ @@ -477,17 +479,21 @@ struct tegra_xudc { struct clk_bulk_data *clks; - enum usb_role device_mode; - struct usb_role_switch *usb_role_sw; + bool device_mode; struct work_struct usb_role_sw_work; - struct phy *usb3_phy; - struct phy *utmi_phy; + struct phy **usb3_phy; + struct phy *curr_usb3_phy; + struct phy **utmi_phy; + struct phy *curr_utmi_phy; struct tegra_xudc_save_regs saved_regs; bool suspended; bool powergated; + struct usb_phy **usbphy; + struct notifier_block vbus_nb; + struct completion disconnect_complete; bool selfpowered; @@ -517,6 +523,7 @@ struct tegra_xudc_soc { unsigned int num_supplies; const char * const *clock_names; unsigned int num_clks; + unsigned int num_phys; bool u1_enable; bool u2_enable; bool lpm_enable; @@ -598,19 +605,18 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc) pm_runtime_get_sync(xudc->dev); - err = phy_power_on(xudc->utmi_phy); + err = phy_power_on(xudc->curr_utmi_phy); if (err < 0) dev_err(xudc->dev, "utmi power on failed %d\n", err); - err = phy_power_on(xudc->usb3_phy); + err = phy_power_on(xudc->curr_usb3_phy); if (err < 0) dev_err(xudc->dev, "usb3 phy power on failed %d\n", err); dev_dbg(xudc->dev, "device mode on\n"); - tegra_xusb_padctl_set_vbus_override(xudc->padctl, true); - - xudc->device_mode = USB_ROLE_DEVICE; + phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, + USB_ROLE_DEVICE); } static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) @@ -625,7 +631,7 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) reinit_completion(&xudc->disconnect_complete); - tegra_xusb_padctl_set_vbus_override(xudc->padctl, false); + phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE); pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT; @@ -643,8 +649,6 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) xudc_writel(xudc, val, PORTSC); } - xudc->device_mode = USB_ROLE_NONE; - /* Wait for disconnect event. */ if (connected) wait_for_completion(&xudc->disconnect_complete); @@ -652,11 +656,11 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) /* Make sure interrupt handler has completed before powergating. */ synchronize_irq(xudc->irq); - err = phy_power_off(xudc->utmi_phy); + err = phy_power_off(xudc->curr_utmi_phy); if (err < 0) dev_err(xudc->dev, "utmi_phy power off failed %d\n", err); - err = phy_power_off(xudc->usb3_phy); + err = phy_power_off(xudc->curr_usb3_phy); if (err < 0) dev_err(xudc->dev, "usb3_phy power off failed %d\n", err); @@ -668,29 +672,57 @@ static void tegra_xudc_usb_role_sw_work(struct work_struct *work) struct tegra_xudc *xudc = container_of(work, struct tegra_xudc, usb_role_sw_work); - if (!xudc->usb_role_sw || - usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE) + if (xudc->device_mode) tegra_xudc_device_mode_on(xudc); else tegra_xudc_device_mode_off(xudc); +} + +static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc, + struct usb_phy *usbphy) +{ + unsigned int i; + for (i = 0; i < xudc->soc->num_phys; i++) { + if (xudc->usbphy[i] && usbphy == xudc->usbphy[i]) + return i; + } + + dev_info(xudc->dev, "phy index could not be found for shared USB PHY"); + return -1; } -static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role) +static int tegra_xudc_vbus_notify(struct notifier_block *nb, + unsigned long action, void *data) { - struct tegra_xudc *xudc = dev_get_drvdata(dev); - unsigned long flags; + struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc, + vbus_nb); + struct usb_phy *usbphy = (struct usb_phy *)data; + int phy_index; - dev_dbg(dev, "%s role is %d\n", __func__, role); + dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event); - spin_lock_irqsave(&xudc->lock, flags); + if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) || + (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) { + dev_dbg(xudc->dev, "Same role(%d) received. Ignore", + xudc->device_mode); + return NOTIFY_OK; + } - if (!xudc->suspended) - schedule_work(&xudc->usb_role_sw_work); + xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true : + false; - spin_unlock_irqrestore(&xudc->lock, flags); + phy_index = tegra_xudc_get_phy_index(xudc, usbphy); + dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__, + phy_index); - return 0; + if (!xudc->suspended && phy_index != -1) { + xudc->curr_utmi_phy = xudc->utmi_phy[phy_index]; + xudc->curr_usb3_phy = xudc->usb3_phy[phy_index]; + schedule_work(&xudc->usb_role_sw_work); + } + + return NOTIFY_OK; } static void tegra_xudc_plc_reset_work(struct work_struct *work) @@ -708,9 +740,11 @@ static void tegra_xudc_plc_reset_work(struct work_struct *work) if (pls == PORTSC_PLS_INACTIVE) { dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n"); - tegra_xusb_padctl_set_vbus_override(xudc->padctl, - false); - tegra_xusb_padctl_set_vbus_override(xudc->padctl, true); + phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, + USB_ROLE_NONE); + phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, + USB_ROLE_DEVICE); + xudc->wait_csc = false; } } @@ -729,8 +763,7 @@ static void tegra_xudc_port_reset_war_work(struct work_struct *work) spin_lock_irqsave(&xudc->lock, flags); - if ((xudc->device_mode == USB_ROLE_DEVICE) - && xudc->wait_for_sec_prc) { + if (xudc->device_mode && xudc->wait_for_sec_prc) { pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT; dev_dbg(xudc->dev, "pls = %x\n", pls); @@ -738,7 +771,8 @@ static void tegra_xudc_port_reset_war_work(struct work_struct *work) if (pls == PORTSC_PLS_DISABLED) { dev_dbg(xudc->dev, "toggle vbus\n"); /* PRC doesn't complete in 100ms, toggle the vbus */ - ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy); + ret = tegra_phy_xusb_utmi_port_reset( + xudc->curr_utmi_phy); if (ret == 1) xudc->wait_for_sec_prc = 0; } @@ -1927,6 +1961,7 @@ static int tegra_xudc_gadget_start(struct usb_gadget *gadget, unsigned long flags; u32 val; int ret; + unsigned int i; if (!driver) return -EINVAL; @@ -1962,6 +1997,10 @@ static int tegra_xudc_gadget_start(struct usb_gadget *gadget, xudc_writel(xudc, val, CTRL); } + for (i = 0; i < xudc->soc->num_phys; i++) + if (xudc->usbphy[i]) + otg_set_peripheral(xudc->usbphy[i]->otg, gadget); + xudc->driver = driver; unlock: dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret); @@ -1977,11 +2016,16 @@ static int tegra_xudc_gadget_stop(struct usb_gadget *gadget) struct tegra_xudc *xudc = to_xudc(gadget); unsigned long flags; u32 val; + unsigned int i; pm_runtime_get_sync(xudc->dev); spin_lock_irqsave(&xudc->lock, flags); + for (i = 0; i < xudc->soc->num_phys; i++) + if (xudc->usbphy[i]) + otg_set_peripheral(xudc->usbphy[i]->otg, NULL); + val = xudc_readl(xudc, CTRL); val &= ~(CTRL_IE | CTRL_ENABLE); xudc_writel(xudc, val, CTRL); @@ -3314,33 +3358,120 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc) xudc_writel(xudc, val, CFG_DEV_SSPI_XFER); } -static int tegra_xudc_phy_init(struct tegra_xudc *xudc) +static int tegra_xudc_phy_get(struct tegra_xudc *xudc) { - int err; + int err = 0, usb3; + unsigned int i; - err = phy_init(xudc->utmi_phy); - if (err < 0) { - dev_err(xudc->dev, "utmi phy init failed: %d\n", err); - return err; - } + xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, + sizeof(*xudc->utmi_phy), GFP_KERNEL); + if (!xudc->utmi_phy) + return -ENOMEM; - err = phy_init(xudc->usb3_phy); - if (err < 0) { - dev_err(xudc->dev, "usb3 phy init failed: %d\n", err); - goto exit_utmi_phy; + xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, + sizeof(*xudc->usb3_phy), GFP_KERNEL); + if (!xudc->usb3_phy) + return -ENOMEM; + + xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, + sizeof(*xudc->usbphy), GFP_KERNEL); + if (!xudc->usbphy) + return -ENOMEM; + + xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify; + + for (i = 0; i < xudc->soc->num_phys; i++) { + char phy_name[] = "usb.-."; + + /* Get USB2 phy */ + snprintf(phy_name, sizeof(phy_name), "usb2-%d", i); + xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); + if (IS_ERR(xudc->utmi_phy[i])) { + err = PTR_ERR(xudc->utmi_phy[i]); + if (err != -EPROBE_DEFER) + dev_err(xudc->dev, "failed to get usb2-%d phy: %d\n", + i, err); + + goto clean_up; + } else if (xudc->utmi_phy[i]) { + /* Get usb-phy, if utmi phy is available */ + xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev, + xudc->utmi_phy[i]->dev.of_node, + &xudc->vbus_nb); + if (IS_ERR(xudc->usbphy[i])) { + err = PTR_ERR(xudc->usbphy[i]); + dev_err(xudc->dev, "failed to get usbphy-%d: %d\n", + i, err); + goto clean_up; + } + } else if (!xudc->utmi_phy[i]) { + /* if utmi phy is not available, ignore USB3 phy get */ + continue; + } + + /* Get USB3 phy */ + usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); + if (usb3 < 0) + continue; + + snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3); + xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); + if (IS_ERR(xudc->usb3_phy[i])) { + err = PTR_ERR(xudc->usb3_phy[i]); + if (err != -EPROBE_DEFER) + dev_err(xudc->dev, "failed to get usb3-%d phy: %d\n", + usb3, err); + + goto clean_up; + } else if (xudc->usb3_phy[i]) + dev_dbg(xudc->dev, "usb3_phy-%d registered", usb3); } - return 0; + return err; + +clean_up: + for (i = 0; i < xudc->soc->num_phys; i++) { + xudc->usb3_phy[i] = NULL; + xudc->utmi_phy[i] = NULL; + xudc->usbphy[i] = NULL; + } -exit_utmi_phy: - phy_exit(xudc->utmi_phy); return err; } static void tegra_xudc_phy_exit(struct tegra_xudc *xudc) { - phy_exit(xudc->usb3_phy); - phy_exit(xudc->utmi_phy); + unsigned int i; + + for (i = 0; i < xudc->soc->num_phys; i++) { + phy_exit(xudc->usb3_phy[i]); + phy_exit(xudc->utmi_phy[i]); + } +} + +static int tegra_xudc_phy_init(struct tegra_xudc *xudc) +{ + int err; + unsigned int i; + + for (i = 0; i < xudc->soc->num_phys; i++) { + err = phy_init(xudc->utmi_phy[i]); + if (err < 0) { + dev_err(xudc->dev, "utmi phy init failed: %d\n", err); + goto exit_phy; + } + + err = phy_init(xudc->usb3_phy[i]); + if (err < 0) { + dev_err(xudc->dev, "usb3 phy init failed: %d\n", err); + goto exit_phy; + } + } + return 0; + +exit_phy: + tegra_xudc_phy_exit(xudc); + return err; } static const char * const tegra210_xudc_supply_names[] = { @@ -3368,6 +3499,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = { .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names), .clock_names = tegra210_xudc_clock_names, .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names), + .num_phys = 4, .u1_enable = false, .u2_enable = true, .lpm_enable = false, @@ -3380,6 +3512,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = { static struct tegra_xudc_soc tegra186_xudc_soc_data = { .clock_names = tegra186_xudc_clock_names, .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names), + .num_phys = 4, .u1_enable = true, .u2_enable = true, .lpm_enable = false, @@ -3457,7 +3590,6 @@ static int tegra_xudc_probe(struct platform_device *pdev) { struct tegra_xudc *xudc; struct resource *res; - struct usb_role_switch_desc role_sx_desc = { 0 }; unsigned int i; int err; @@ -3492,11 +3624,8 @@ static int tegra_xudc_probe(struct platform_device *pdev) } xudc->irq = platform_get_irq(pdev, 0); - if (xudc->irq < 0) { - dev_err(xudc->dev, "failed to get IRQ: %d\n", - xudc->irq); + if (xudc->irq < 0) return xudc->irq; - } err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0, dev_name(&pdev->dev), xudc); @@ -3546,19 +3675,9 @@ static int tegra_xudc_probe(struct platform_device *pdev) goto put_padctl; } - xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3"); - if (IS_ERR(xudc->usb3_phy)) { - err = PTR_ERR(xudc->usb3_phy); - dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err); - goto disable_regulator; - } - - xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2"); - if (IS_ERR(xudc->utmi_phy)) { - err = PTR_ERR(xudc->utmi_phy); - dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err); + err = tegra_xudc_phy_get(xudc); + if (err) goto disable_regulator; - } err = tegra_xudc_powerdomain_init(xudc); if (err) @@ -3587,24 +3706,6 @@ static int tegra_xudc_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&xudc->port_reset_war_work, tegra_xudc_port_reset_war_work); - if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) { - role_sx_desc.set = tegra_xudc_usb_role_sw_set; - role_sx_desc.fwnode = dev_fwnode(xudc->dev); - - xudc->usb_role_sw = usb_role_switch_register(xudc->dev, - &role_sx_desc); - if (IS_ERR(xudc->usb_role_sw)) { - err = PTR_ERR(xudc->usb_role_sw); - dev_err(xudc->dev, "Failed to register USB role SW: %d", - err); - goto free_eps; - } - } else { - /* Set the mode as device mode and this keeps phy always ON */ - dev_info(xudc->dev, "Set usb role to device mode always"); - schedule_work(&xudc->usb_role_sw_work); - } - pm_runtime_enable(&pdev->dev); xudc->gadget.ops = &tegra_xudc_gadget_ops; @@ -3639,15 +3740,12 @@ put_padctl: static int tegra_xudc_remove(struct platform_device *pdev) { struct tegra_xudc *xudc = platform_get_drvdata(pdev); + unsigned int i; pm_runtime_get_sync(xudc->dev); cancel_delayed_work(&xudc->plc_reset_work); - - if (xudc->usb_role_sw) { - usb_role_switch_unregister(xudc->usb_role_sw); - cancel_work_sync(&xudc->usb_role_sw_work); - } + cancel_work_sync(&xudc->usb_role_sw_work); usb_del_gadget_udc(&xudc->gadget); @@ -3658,8 +3756,10 @@ static int tegra_xudc_remove(struct platform_device *pdev) regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies); - phy_power_off(xudc->utmi_phy); - phy_power_off(xudc->usb3_phy); + for (i = 0; i < xudc->soc->num_phys; i++) { + phy_power_off(xudc->utmi_phy[i]); + phy_power_off(xudc->usb3_phy[i]); + } tegra_xudc_phy_exit(xudc); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 29d8e5f8bb58..b1cfc8279c3d 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1399,7 +1399,6 @@ err: /** * xudc_stop - stops the device. * @gadget: pointer to the usb gadget structure - * @driver: pointer to usb gadget driver structure * * Return: zero always */ diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index bd4f6ef534d9..1300c457d9ed 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -110,11 +110,12 @@ static int mv_ehci_probe(struct platform_device *pdev) struct resource *r; int retval = -ENODEV; u32 offset; + u32 status; if (usb_disabled()) return -ENODEV; - hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, "mv ehci"); + hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; @@ -213,6 +214,14 @@ static int mv_ehci_probe(struct platform_device *pdev) device_wakeup_enable(hcd->self.controller); } + if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC) { + status = ehci_readl(ehci, &ehci->regs->port_status[0]); + /* These "reserved" bits actually enable HSIC mode. */ + status |= BIT(25); + status &= ~GENMASK(31, 30); + ehci_writel(ehci, status, &ehci->regs->port_status[0]); + } + dev_info(&pdev->dev, "successful find EHCI device with regs 0x%p irq %d" " working in %s mode\n", hcd->regs, hcd->irq, diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index b0882c13a1d1..1a48ab1bd3b2 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -384,7 +384,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ehci_pci_driver = { - .name = (char *) hcd_name, + .name = hcd_name, .id_table = pci_ids, .probe = ehci_pci_probe, diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 769749ca5961..e4fc3f66d43b 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -29,6 +29,8 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> +#include <linux/sys_soc.h> +#include <linux/timer.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/usb/ehci_pdriver.h> @@ -44,6 +46,9 @@ struct ehci_platform_priv { struct clk *clks[EHCI_MAX_CLKS]; struct reset_control *rsts; bool reset_on_resume; + bool quirk_poll; + struct timer_list poll_timer; + struct delayed_work poll_work; }; static const char hcd_name[] = "ehci-platform"; @@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = { .power_off = ehci_platform_power_off, }; +/** + * quirk_poll_check_port_status - Poll port_status if the device sticks + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * detect such a situation, the controllers require a special way which poll + * the EHCI PORTSC register. + * + * Return: true if the controller's port_status indicated getting stuck + */ +static bool quirk_poll_check_port_status(struct ehci_hcd *ehci) +{ + u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); + + if (!(port_status & PORT_OWNER) && + (port_status & PORT_POWER) && + !(port_status & PORT_CONNECT) && + (port_status & PORT_LS_MASK)) + return true; + + return false; +} + +/** + * quirk_poll_rebind_companion - rebind comanion device to recover + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * recover from such a situation, the controllers require changing the OHCI + * functional state. + */ +static void quirk_poll_rebind_companion(struct ehci_hcd *ehci) +{ + struct device *companion_dev; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + + companion_dev = usb_of_get_companion_dev(hcd->self.controller); + if (!companion_dev) + return; + + device_release_driver(companion_dev); + if (device_attach(companion_dev) < 0) + ehci_err(ehci, "%s: failed\n", __func__); + + put_device(companion_dev); +} + +static void quirk_poll_work(struct work_struct *work) +{ + struct ehci_platform_priv *priv = + container_of(to_delayed_work(work), struct ehci_platform_priv, + poll_work); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + /* check the status twice to reduce misdetection rate */ + if (!quirk_poll_check_port_status(ehci)) + return; + udelay(10); + if (!quirk_poll_check_port_status(ehci)) + return; + + ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__); + quirk_poll_rebind_companion(ehci); +} + +static void quirk_poll_timer(struct timer_list *t) +{ + struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + if (quirk_poll_check_port_status(ehci)) { + /* + * Now scheduling the work for testing the port more. Note that + * updating the status is possible to be delayed when + * reconnection. So, this uses delayed work with 5 ms delay + * to avoid misdetection. + */ + schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5)); + } + + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_init(struct ehci_platform_priv *priv) +{ + INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work); + timer_setup(&priv->poll_timer, quirk_poll_timer, 0); + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_end(struct ehci_platform_priv *priv) +{ + del_timer_sync(&priv->poll_timer); + cancel_delayed_work(&priv->poll_work); +} + +static const struct soc_device_attribute quirk_poll_match[] = { + { .family = "R-Car Gen3" }, + { /* sentinel*/ } +}; + static int ehci_platform_probe(struct platform_device *dev) { struct usb_hcd *hcd; @@ -176,6 +286,9 @@ static int ehci_platform_probe(struct platform_device *dev) "has-transaction-translator")) hcd->has_tt = 1; + if (soc_device_match(quirk_poll_match)) + priv->quirk_poll = true; + for (clk = 0; clk < EHCI_MAX_CLKS; clk++) { priv->clks[clk] = of_clk_get(dev->dev.of_node, clk); if (IS_ERR(priv->clks[clk])) { @@ -247,6 +360,9 @@ static int ehci_platform_probe(struct platform_device *dev) device_enable_async_suspend(hcd->self.controller); platform_set_drvdata(dev, hcd); + if (priv->quirk_poll) + quirk_poll_init(priv); + return err; err_power: @@ -273,6 +389,9 @@ static int ehci_platform_remove(struct platform_device *dev) struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); int clk; + if (priv->quirk_poll) + quirk_poll_end(priv); + usb_remove_hcd(hcd); if (pdata->power_off) @@ -297,9 +416,13 @@ static int ehci_platform_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = to_platform_device(dev); + struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); bool do_wakeup = device_may_wakeup(dev); int ret; + if (priv->quirk_poll) + quirk_poll_end(priv); + ret = ehci_suspend(hcd, do_wakeup); if (ret) return ret; @@ -331,6 +454,10 @@ static int ehci_platform_resume(struct device *dev) } ehci_resume(hcd, priv->reset_on_resume); + + if (priv->quirk_poll) + quirk_poll_init(priv); + return 0; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index d6433f206c17..10d51daa6a1b 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -282,7 +282,7 @@ done: struct dma_aligned_buffer { void *kmalloc_ptr; void *old_xfer_buffer; - u8 data[0]; + u8 data[]; }; static void free_dma_aligned_buffer(struct urb *urb) diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index ac5e967907d1..229b3de319e6 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -255,7 +255,7 @@ struct ehci_hcd { /* one per controller */ struct list_head tt_list; /* platform-specific data -- must come last */ - unsigned long priv[0] __aligned(sizeof(s64)); + unsigned long priv[] __aligned(sizeof(s64)); }; /* convert between an HCD pointer and the corresponding EHCI_HCD */ @@ -460,7 +460,7 @@ struct ehci_iso_sched { struct list_head td_list; unsigned span; unsigned first_packet; - struct ehci_iso_packet packet[0]; + struct ehci_iso_packet packet[]; }; /* diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 04733876c9c6..a8e1048278d0 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -396,6 +396,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, case PIPE_CONTROL: /* 1 td fro setup,1 for ack */ size = 2; + fallthrough; case PIPE_BULK: /* one td for every 4096 bytes(can be up to 8k) */ size += urb->transfer_buffer_length / 4096; diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h index 1b4db95e5c43..6cee40ec65b4 100644 --- a/drivers/usb/host/fotg210.h +++ b/drivers/usb/host/fotg210.h @@ -490,7 +490,7 @@ struct fotg210_iso_packet { struct fotg210_iso_sched { struct list_head td_list; unsigned span; - struct fotg210_iso_packet packet[0]; + struct fotg210_iso_packet packet[]; }; /* diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index f4e13a3fddee..22117a6aeb4a 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -288,7 +288,7 @@ MODULE_DEVICE_TABLE (pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ohci_pci_driver = { - .name = (char *) hcd_name, + .name = hcd_name, .id_table = pci_ids, .probe = usb_hcd_pci_probe, diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index b015b00774b2..27c26ca10bfd 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -337,7 +337,7 @@ typedef struct urb_priv { u16 length; // # tds in this request u16 td_cnt; // tds already serviced struct list_head pending; - struct td *td [0]; // all TDs in this request + struct td *td[]; // all TDs in this request } urb_priv_t; @@ -435,7 +435,7 @@ struct ohci_hcd { struct dentry *debug_dir; /* platform-specific data -- must come last */ - unsigned long priv[0] __aligned(sizeof(s64)); + unsigned long priv[] __aligned(sizeof(s64)); }; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 72a34a1eb618..adaf4063690a 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -1792,7 +1792,7 @@ struct platform_driver sl811h_driver = { .suspend = sl811h_suspend, .resume = sl811h_resume, .driver = { - .name = (char *) hcd_name, + .name = hcd_name, }, }; EXPORT_SYMBOL(sl811h_driver); diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 0fa3d72bae26..957c87efc746 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -294,7 +294,7 @@ static const struct pci_device_id uhci_pci_ids[] = { { MODULE_DEVICE_TABLE(pci, uhci_pci_ids); static struct pci_driver uhci_pci_driver = { - .name = (char *)hcd_name, + .name = hcd_name, .id_table = uhci_pci_ids, .probe = usb_hcd_pci_probe, diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 3c4abb5a1c3f..5546e7e013a8 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -219,8 +219,7 @@ static int xhci_histb_probe(struct platform_device *pdev) if (irq < 0) return irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - histb->ctrl = devm_ioremap_resource(&pdev->dev, res); + histb->ctrl = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(histb->ctrl)) return PTR_ERR(histb->ctrl); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 7a3a29e5e9d2..9eca1fe81061 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = { static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, u16 wLength) { + struct xhci_port_cap *port_cap = NULL; int i, ssa_count; u32 temp; u16 desc_size, ssp_cap_size, ssa_size = 0; @@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size; /* does xhci support USB 3.1 Enhanced SuperSpeed */ - if (xhci->usb3_rhub.min_rev >= 0x01) { + for (i = 0; i < xhci->num_port_caps; i++) { + if (xhci->port_caps[i].maj_rev == 0x03 && + xhci->port_caps[i].min_rev >= 0x01) { + usb3_1 = true; + port_cap = &xhci->port_caps[i]; + break; + } + } + + if (usb3_1) { /* does xhci provide a PSI table for SSA speed attributes? */ - if (xhci->usb3_rhub.psi_count) { + if (port_cap->psi_count) { /* two SSA entries for each unique PSI ID, RX and TX */ - ssa_count = xhci->usb3_rhub.psi_uid_count * 2; + ssa_count = port_cap->psi_uid_count * 2; ssa_size = ssa_count * sizeof(u32); ssp_cap_size -= 16; /* skip copying the default SSA */ } desc_size += ssp_cap_size; - usb3_1 = true; } memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength)); @@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, } /* If PSI table exists, add the custom speed attributes from it */ - if (usb3_1 && xhci->usb3_rhub.psi_count) { + if (usb3_1 && port_cap->psi_count) { u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; int offset; @@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */ bm_attrib = (ssa_count - 1) & 0x1f; - bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5; + bm_attrib |= (port_cap->psi_uid_count - 1) << 5; put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]); if (wLength < desc_size + ssa_size) @@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, * USB 3.1 requires two SSA entries (RX and TX) for every link */ offset = desc_size; - for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { - psi = xhci->usb3_rhub.psi[i]; + for (i = 0; i < port_cap->psi_count; i++) { + psi = port_cap->psi[i]; psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; psi_exp = XHCI_EXT_PORT_PSIE(psi); psi_mant = XHCI_EXT_PORT_PSIM(psi); @@ -558,6 +567,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) */ static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, bool on, unsigned long *flags) + __must_hold(&xhci->lock) { struct xhci_hub *rhub; struct xhci_port *port; @@ -608,6 +618,7 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci, static int xhci_enter_test_mode(struct xhci_hcd *xhci, u16 test_mode, u16 wIndex, unsigned long *flags) + __must_hold(&xhci->lock) { int i, retval; @@ -1297,7 +1308,47 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, wIndex, link_state); goto error; } + + /* + * set link to U0, steps depend on current link state. + * U3: set link to U0 and wait for u3exit completion. + * U1/U2: no PLC complete event, only set link to U0. + * Resume/Recovery: device initiated U0, only wait for + * completion + */ + if (link_state == USB_SS_PORT_LS_U0) { + u32 pls = temp & PORT_PLS_MASK; + bool wait_u0 = false; + + /* already in U0 */ + if (pls == XDEV_U0) + break; + if (pls == XDEV_U3 || + pls == XDEV_RESUME || + pls == XDEV_RECOVERY) { + wait_u0 = true; + reinit_completion(&bus_state->u3exit_done[wIndex]); + } + if (pls <= XDEV_U3) /* U1, U2, U3 */ + xhci_set_link_state(xhci, ports[wIndex], + USB_SS_PORT_LS_U0); + if (!wait_u0) { + if (pls > XDEV_U3) + goto error; + break; + } + spin_unlock_irqrestore(&xhci->lock, flags); + if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], + msecs_to_jiffies(100))) + xhci_dbg(xhci, "missing U0 port change event for port %d\n", + wIndex); + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); + break; + } + if (link_state == USB_SS_PORT_LS_U3) { + int retries = 16; slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (slot_id) { @@ -1308,17 +1359,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } - } - - xhci_set_link_state(xhci, ports[wIndex], link_state); - - spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); /* wait device to enter */ - spin_lock_irqsave(&xhci->lock, flags); - - temp = readl(ports[wIndex]->addr); - if (link_state == USB_SS_PORT_LS_U3) + xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3); + spin_unlock_irqrestore(&xhci->lock, flags); + while (retries--) { + usleep_range(4000, 8000); + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) == XDEV_U3) + break; + } + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); bus_state->suspended_ports |= 1 << wIndex; + } break; case USB_PORT_FEAT_POWER: /* diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3b1388fa2f36..9764122c9cdf 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Allow 3 retries for everything but isoc, set CErr = 3 */ if (!usb_endpoint_xfer_isoc(&ep->desc)) err_count = 3; - /* Some devices get this wrong */ - if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH) - max_packet = 512; + /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ + if (usb_endpoint_xfer_bulk(&ep->desc)) { + if (udev->speed == USB_SPEED_HIGH) + max_packet = 512; + if (udev->speed == USB_SPEED_FULL) { + max_packet = rounddown_pow_of_two(max_packet); + max_packet = clamp_val(max_packet, 8, 64); + } + } /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) avg_trb_len = 8; @@ -1909,17 +1915,17 @@ no_bw: xhci->usb3_rhub.num_ports = 0; xhci->num_active_eps = 0; kfree(xhci->usb2_rhub.ports); - kfree(xhci->usb2_rhub.psi); kfree(xhci->usb3_rhub.ports); - kfree(xhci->usb3_rhub.psi); kfree(xhci->hw_ports); kfree(xhci->rh_bw); kfree(xhci->ext_caps); + for (i = 0; i < xhci->num_port_caps; i++) + kfree(xhci->port_caps[i].psi); + kfree(xhci->port_caps); + xhci->num_port_caps = 0; xhci->usb2_rhub.ports = NULL; - xhci->usb2_rhub.psi = NULL; xhci->usb3_rhub.ports = NULL; - xhci->usb3_rhub.psi = NULL; xhci->hw_ports = NULL; xhci->rh_bw = NULL; xhci->ext_caps = NULL; @@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, u8 major_revision, minor_revision; struct xhci_hub *rhub; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_port_cap *port_cap; temp = readl(addr); major_revision = XHCI_EXT_PORT_MAJOR(temp); @@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; - rhub->psi_count = XHCI_EXT_PORT_PSIC(temp); - if (rhub->psi_count) { - rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi), - GFP_KERNEL, dev_to_node(dev)); - if (!rhub->psi) - rhub->psi_count = 0; + port_cap = &xhci->port_caps[xhci->num_port_caps++]; + if (xhci->num_port_caps > max_caps) + return; + + port_cap->maj_rev = major_revision; + port_cap->min_rev = minor_revision; + port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); - rhub->psi_uid_count++; - for (i = 0; i < rhub->psi_count; i++) { - rhub->psi[i] = readl(addr + 4 + i); + if (port_cap->psi_count) { + port_cap->psi = kcalloc_node(port_cap->psi_count, + sizeof(*port_cap->psi), + GFP_KERNEL, dev_to_node(dev)); + if (!port_cap->psi) + port_cap->psi_count = 0; + + port_cap->psi_uid_count++; + for (i = 0; i < port_cap->psi_count; i++) { + port_cap->psi[i] = readl(addr + 4 + i); /* count unique ID values, two consecutive entries can * have the same ID if link is assymetric */ - if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) != - XHCI_EXT_PORT_PSIV(rhub->psi[i - 1]))) - rhub->psi_uid_count++; + if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != + XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) + port_cap->psi_uid_count++; xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", - XHCI_EXT_PORT_PSIV(rhub->psi[i]), - XHCI_EXT_PORT_PSIE(rhub->psi[i]), - XHCI_EXT_PORT_PLT(rhub->psi[i]), - XHCI_EXT_PORT_PFD(rhub->psi[i]), - XHCI_EXT_PORT_LP(rhub->psi[i]), - XHCI_EXT_PORT_PSIM(rhub->psi[i])); + XHCI_EXT_PORT_PSIV(port_cap->psi[i]), + XHCI_EXT_PORT_PSIE(port_cap->psi[i]), + XHCI_EXT_PORT_PLT(port_cap->psi[i]), + XHCI_EXT_PORT_PFD(port_cap->psi[i]), + XHCI_EXT_PORT_LP(port_cap->psi[i]), + XHCI_EXT_PORT_PSIM(port_cap->psi[i])); } } /* cache usb2 port capabilities */ @@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, continue; } hw_port->rhub = rhub; + hw_port->port_cap = port_cap; rhub->num_ports++; } /* FIXME: Should we disable ports not in the Extended Capabilities? */ @@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->ext_caps) return -ENOMEM; + xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), + flags, dev_to_node(dev)); + if (!xhci->port_caps) + return -ENOMEM; + offset = cap_start; while (offset) { @@ -2531,6 +2552,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->usb3_rhub.bus_state.resume_done[i] = 0; /* Only the USB 2.0 completions will ever be used. */ init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); + init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); } if (scratchpad_alloc(xhci, flags)) diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 5ac458b7d2e0..acd56517215a 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -95,7 +95,7 @@ struct mu3h_sch_ep_info { u32 pkts; u32 cs_count; u32 burst_mode; - u32 bw_budget_table[0]; + u32 bw_budget_table[]; }; #define MU3C_U3_PORT_MAX 4 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4917c5b033fa..766b74723e64 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -49,6 +49,8 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13 +#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af +#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba @@ -135,7 +137,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD && - (pdev->device == 0x15e0 || + (pdev->device == 0x145c || + pdev->device == 0x15e0 || pdev->device == 0x15e1 || pdev->device == 0x43bb)) xhci->quirks |= XHCI_SUSPEND_DELAY; @@ -187,7 +190,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -214,7 +218,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && @@ -241,6 +246,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + xhci->quirks |= XHCI_LPM_SUPPORT; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; @@ -302,6 +310,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return 0; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(pdev); + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ @@ -359,9 +370,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_acpi_rtd3_enable(dev); - /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -547,7 +555,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver xhci_pci_driver = { - .name = (char *) hcd_name, + .name = hcd_name, .id_table = pci_ids, .probe = xhci_pci_probe, diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index d90cd5ec09cf..1d4f6f85f0fe 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -219,8 +219,7 @@ static int xhci_plat_probe(struct platform_device *pdev) goto disable_runtime; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto put_hcd; @@ -445,6 +444,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d23f7408c81f..a78787bb5133 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -955,6 +955,7 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t) struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer); struct xhci_hcd *xhci = ep->xhci; unsigned long flags; + u32 usbsts; spin_lock_irqsave(&xhci->lock, flags); @@ -965,8 +966,11 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t) xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit"); return; } + usbsts = readl(&xhci->op_regs->status); xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); + xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(usbsts)); + ep->ep_state &= ~EP_STOP_CMD_PENDING; xhci_halt(xhci); @@ -1677,6 +1681,7 @@ static void handle_port_status(struct xhci_hcd *xhci, (portsc & PORT_PLS_MASK) == XDEV_U1 || (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); + complete(&bus_state->u3exit_done[hcd_portnum]); /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device @@ -2413,6 +2418,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, status = -EPIPE; break; case COMP_SPLIT_TRANSACTION_ERROR: + xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", + slot_id, ep_index); + status = -EPROTO; + break; case COMP_USB_TRANSACTION_ERROR: xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", slot_id, ep_index); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 8163aefc6c6b..2eaf5c0af80c 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -24,6 +24,9 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/slab.h> +#include <linux/usb/otg.h> +#include <linux/usb/phy.h> +#include <linux/usb/role.h> #include <soc/tegra/pmc.h> #include "xhci.h" @@ -203,6 +206,8 @@ struct tegra_xusb_soc { bool scale_ss_clock; bool has_ipfs; + bool lpm_support; + bool otg_reset_sspi; }; struct tegra_xusb_context { @@ -250,6 +255,14 @@ struct tegra_xusb { struct phy **phys; unsigned int num_phys; + struct usb_phy **usbphy; + unsigned int num_usb_phys; + int otg_usb2_port; + int otg_usb3_port; + bool host_mode; + struct notifier_block id_nb; + struct work_struct id_work; + /* Firmware loading related */ struct { size_t size; @@ -1081,6 +1094,205 @@ static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra) return err; } +static void tegra_xhci_set_port_power(struct tegra_xusb *tegra, bool main, + bool set) +{ + struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); + struct usb_hcd *hcd = main ? xhci->main_hcd : xhci->shared_hcd; + unsigned int wait = (!main && !set) ? 1000 : 10; + u16 typeReq = set ? SetPortFeature : ClearPortFeature; + u16 wIndex = main ? tegra->otg_usb2_port + 1 : tegra->otg_usb3_port + 1; + u32 status; + u32 stat_power = main ? USB_PORT_STAT_POWER : USB_SS_PORT_STAT_POWER; + u32 status_val = set ? stat_power : 0; + + dev_dbg(tegra->dev, "%s():%s %s port power\n", __func__, + set ? "set" : "clear", main ? "HS" : "SS"); + + hcd->driver->hub_control(hcd, typeReq, USB_PORT_FEAT_POWER, wIndex, + NULL, 0); + + do { + tegra_xhci_hc_driver.hub_control(hcd, GetPortStatus, 0, wIndex, + (char *) &status, sizeof(status)); + if (status_val == (status & stat_power)) + break; + + if (!main && !set) + usleep_range(600, 700); + else + usleep_range(10, 20); + } while (--wait > 0); + + if (status_val != (status & stat_power)) + dev_info(tegra->dev, "failed to %s %s PP %d\n", + set ? "set" : "clear", + main ? "HS" : "SS", status); +} + +static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name, + int port) +{ + unsigned int i, phy_count = 0; + + for (i = 0; i < tegra->soc->num_types; i++) { + if (!strncmp(tegra->soc->phy_types[i].name, "usb2", + strlen(name))) + return tegra->phys[phy_count+port]; + + phy_count += tegra->soc->phy_types[i].num; + } + + return NULL; +} + +static void tegra_xhci_id_work(struct work_struct *work) +{ + struct tegra_xusb *tegra = container_of(work, struct tegra_xusb, + id_work); + struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); + struct tegra_xusb_mbox_msg msg; + struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", + tegra->otg_usb2_port); + u32 status; + int ret; + + dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off"); + + mutex_lock(&tegra->lock); + + if (tegra->host_mode) + phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_HOST); + else + phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_NONE); + + mutex_unlock(&tegra->lock); + + if (tegra->host_mode) { + /* switch to host mode */ + if (tegra->otg_usb3_port >= 0) { + if (tegra->soc->otg_reset_sspi) { + /* set PP=0 */ + tegra_xhci_hc_driver.hub_control( + xhci->shared_hcd, GetPortStatus, + 0, tegra->otg_usb3_port+1, + (char *) &status, sizeof(status)); + if (status & USB_SS_PORT_STAT_POWER) + tegra_xhci_set_port_power(tegra, false, + false); + + /* reset OTG port SSPI */ + msg.cmd = MBOX_CMD_RESET_SSPI; + msg.data = tegra->otg_usb3_port+1; + + ret = tegra_xusb_mbox_send(tegra, &msg); + if (ret < 0) { + dev_info(tegra->dev, + "failed to RESET_SSPI %d\n", + ret); + } + } + + tegra_xhci_set_port_power(tegra, false, true); + } + + tegra_xhci_set_port_power(tegra, true, true); + + } else { + if (tegra->otg_usb3_port >= 0) + tegra_xhci_set_port_power(tegra, false, false); + + tegra_xhci_set_port_power(tegra, true, false); + } +} + +static int tegra_xusb_get_usb2_port(struct tegra_xusb *tegra, + struct usb_phy *usbphy) +{ + unsigned int i; + + for (i = 0; i < tegra->num_usb_phys; i++) { + if (tegra->usbphy[i] && usbphy == tegra->usbphy[i]) + return i; + } + + return -1; +} + +static int tegra_xhci_id_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct tegra_xusb *tegra = container_of(nb, struct tegra_xusb, + id_nb); + struct usb_phy *usbphy = (struct usb_phy *)data; + + dev_dbg(tegra->dev, "%s(): action is %d", __func__, usbphy->last_event); + + if ((tegra->host_mode && usbphy->last_event == USB_EVENT_ID) || + (!tegra->host_mode && usbphy->last_event != USB_EVENT_ID)) { + dev_dbg(tegra->dev, "Same role(%d) received. Ignore", + tegra->host_mode); + return NOTIFY_OK; + } + + tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy); + tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion( + tegra->padctl, + tegra->otg_usb2_port); + + tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false; + + schedule_work(&tegra->id_work); + + return NOTIFY_OK; +} + +static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra) +{ + unsigned int i; + + tegra->usbphy = devm_kcalloc(tegra->dev, tegra->num_usb_phys, + sizeof(*tegra->usbphy), GFP_KERNEL); + if (!tegra->usbphy) + return -ENOMEM; + + INIT_WORK(&tegra->id_work, tegra_xhci_id_work); + tegra->id_nb.notifier_call = tegra_xhci_id_notify; + + for (i = 0; i < tegra->num_usb_phys; i++) { + struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i); + + if (!phy) + continue; + + tegra->usbphy[i] = devm_usb_get_phy_by_node(tegra->dev, + phy->dev.of_node, + &tegra->id_nb); + if (!IS_ERR(tegra->usbphy[i])) { + dev_dbg(tegra->dev, "usbphy-%d registered", i); + otg_set_host(tegra->usbphy[i]->otg, &tegra->hcd->self); + } else { + /* + * usb-phy is optional, continue if its not available. + */ + tegra->usbphy[i] = NULL; + } + } + + return 0; +} + +static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra) +{ + unsigned int i; + + cancel_work_sync(&tegra->id_work); + + for (i = 0; i < tegra->num_usb_phys; i++) + if (tegra->usbphy[i]) + otg_set_host(tegra->usbphy[i]->otg, NULL); +} + static int tegra_xusb_probe(struct platform_device *pdev) { struct tegra_xusb *tegra; @@ -1254,8 +1466,11 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto put_powerdomains; } - for (i = 0; i < tegra->soc->num_types; i++) + for (i = 0; i < tegra->soc->num_types; i++) { + if (!strncmp(tegra->soc->phy_types[i].name, "usb2", 4)) + tegra->num_usb_phys = tegra->soc->phy_types[i].num; tegra->num_phys += tegra->soc->phy_types[i].num; + } tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys, sizeof(*tegra->phys), GFP_KERNEL); @@ -1384,6 +1599,12 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto remove_usb3; } + err = tegra_xusb_init_usb_phy(tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to init USB PHY: %d\n", err); + goto remove_usb3; + } + return 0; remove_usb3: @@ -1420,6 +1641,8 @@ static int tegra_xusb_remove(struct platform_device *pdev) struct tegra_xusb *tegra = platform_get_drvdata(pdev); struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); + tegra_xusb_deinit_usb_phy(tegra); + usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); xhci->shared_hcd = NULL; @@ -1694,6 +1917,7 @@ static const struct tegra_xusb_soc tegra124_soc = { }, .scale_ss_clock = true, .has_ipfs = true, + .otg_reset_sspi = false, .mbox = { .cmd = 0xe4, .data_in = 0xe8, @@ -1733,6 +1957,7 @@ static const struct tegra_xusb_soc tegra210_soc = { }, .scale_ss_clock = false, .has_ipfs = true, + .otg_reset_sspi = true, .mbox = { .cmd = 0xe4, .data_in = 0xe8, @@ -1773,12 +1998,14 @@ static const struct tegra_xusb_soc tegra186_soc = { }, .scale_ss_clock = false, .has_ipfs = false, + .otg_reset_sspi = false, .mbox = { .cmd = 0xe4, .data_in = 0xe8, .data_out = 0xec, .owner = 0xf0, }, + .lpm_support = true, }; static const char * const tegra194_supply_names[] = { @@ -1802,12 +2029,14 @@ static const struct tegra_xusb_soc tegra194_soc = { }, .scale_ss_clock = false, .has_ipfs = false, + .otg_reset_sspi = false, .mbox = { .cmd = 0x68, .data_in = 0x6c, .data_out = 0x70, .owner = 0x74, }, + .lpm_support = true, }; MODULE_FIRMWARE("nvidia/tegra194/xusb.bin"); @@ -1832,7 +2061,11 @@ static struct platform_driver tegra_xusb_driver = { static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci) { + struct tegra_xusb *tegra = dev_get_drvdata(dev); + xhci->quirks |= XHCI_PLAT; + if (tegra && tegra->soc->lpm_support) + xhci->quirks |= XHCI_LPM_SUPPORT; } static int tegra_xhci_setup(struct usb_hcd *hcd) diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 56eb867803a6..b19582b2a72c 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb, ), TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x", __entry->epnum, __entry->dir_in ? "in" : "out", - ({ char *s; - switch (__entry->type) { - case USB_ENDPOINT_XFER_INT: - s = "intr"; - break; - case USB_ENDPOINT_XFER_CONTROL: - s = "control"; - break; - case USB_ENDPOINT_XFER_BULK: - s = "bulk"; - break; - case USB_ENDPOINT_XFER_ISOC: - s = "isoc"; - break; - default: - s = "UNKNOWN"; - } s; }), __entry->urb, __entry->pipe, __entry->slot_id, + __print_symbolic(__entry->type, + { USB_ENDPOINT_XFER_INT, "intr" }, + { USB_ENDPOINT_XFER_CONTROL, "control" }, + { USB_ENDPOINT_XFER_BULK, "bulk" }, + { USB_ENDPOINT_XFER_ISOC, "isoc" }), + __entry->urb, __entry->pipe, __entry->slot_id, __entry->actual, __entry->length, __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream, __entry->flags ) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index dbac0fa9748d..fe38275363e0 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1157,8 +1157,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - xhci_reset(xhci); + retval = xhci_reset(xhci); spin_unlock_irq(&xhci->lock); + if (retval) + return retval; xhci_cleanup_msix(xhci); xhci_dbg(xhci, "// Disabling event ring interrupts\n"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 13d8838cd552..3289bb516201 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1642,7 +1642,7 @@ struct xhci_scratchpad { struct urb_priv { int num_tds; int num_tds_done; - struct xhci_td td[0]; + struct xhci_td td[]; }; /* @@ -1694,6 +1694,7 @@ struct xhci_bus_state { /* Which ports are waiting on RExit to U0 transition. */ unsigned long rexit_ports; struct completion rexit_done[USB_MAXCHILDREN]; + struct completion u3exit_done[USB_MAXCHILDREN]; }; @@ -1702,12 +1703,20 @@ struct xhci_bus_state { * Intel Lynx Point LP xHCI host. */ #define XHCI_MAX_REXIT_TIMEOUT_MS 20 +struct xhci_port_cap { + u32 *psi; /* array of protocol speed ID entries */ + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; +}; struct xhci_port { __le32 __iomem *addr; int hw_portnum; int hcd_portnum; struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; }; struct xhci_hub { @@ -1719,9 +1728,6 @@ struct xhci_hub { /* supported prococol extended capabiliy values */ u8 maj_rev; u8 min_rev; - u32 *psi; /* array of protocol speed ID entries */ - u8 psi_count; - u8 psi_uid_count; }; /* There is one xhci_hcd structure per controller */ @@ -1880,6 +1886,9 @@ struct xhci_hcd { /* cached usb2 extened protocol capabilites */ u32 *ext_caps; unsigned int num_ext_caps; + /* cached extended protocol port capabilities */ + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; /* Compliance Mode Recovery Data */ struct timer_list comp_mode_recovery_timer; u32 port_status_u0; @@ -1893,7 +1902,7 @@ struct xhci_hcd { void *dbc; /* platform-specific data -- must come last */ - unsigned long priv[0] __aligned(sizeof(s64)); + unsigned long priv[] __aligned(sizeof(s64)); }; /* Platform specific overrides to generic XHCI hc_driver ops */ @@ -2581,6 +2590,35 @@ static inline const char *xhci_decode_portsc(u32 portsc) return str; } +static inline const char *xhci_decode_usbsts(u32 usbsts) +{ + static char str[256]; + int ret = 0; + + if (usbsts == ~(u32)0) + return " 0xffffffff"; + if (usbsts & STS_HALT) + ret += sprintf(str + ret, " HCHalted"); + if (usbsts & STS_FATAL) + ret += sprintf(str + ret, " HSE"); + if (usbsts & STS_EINT) + ret += sprintf(str + ret, " EINT"); + if (usbsts & STS_PORT) + ret += sprintf(str + ret, " PCD"); + if (usbsts & STS_SAVE) + ret += sprintf(str + ret, " SSS"); + if (usbsts & STS_RESTORE) + ret += sprintf(str + ret, " RSS"); + if (usbsts & STS_SRE) + ret += sprintf(str + ret, " SRE"); + if (usbsts & STS_CNR) + ret += sprintf(str + ret, " CNR"); + if (usbsts & STS_HCE) + ret += sprintf(str + ret, " HCE"); + + return str; +} + static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell) { static char str[256]; diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 834b2494da73..833a460ee55a 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -137,6 +137,16 @@ config USB_APPLEDISPLAY Say Y here if you want to control the backlight of Apple Cinema Displays over USB. This driver provides a sysfs interface. +config APPLE_MFI_FASTCHARGE + tristate "Fast charge control for iOS devices" + select POWER_SUPPLY + help + Say Y here if you want to control whether iOS devices will + fast charge from the USB interface, as implemented in "MFi" + chargers. + + It is safe to say M here. + source "drivers/usb/misc/sisusbvga/Kconfig" config USB_LD diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 0d416eb624bb..da39bddb0604 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_USB_EMI26) += emi26.o obj-$(CONFIG_USB_EMI62) += emi62.o obj-$(CONFIG_USB_EZUSB_FX2) += ezusb.o obj-$(CONFIG_USB_FTDI_ELAN) += ftdi-elan.o +obj-$(CONFIG_APPLE_MFI_FASTCHARGE) += apple-mfi-fastcharge.o obj-$(CONFIG_USB_IDMOUSE) += idmouse.o obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c new file mode 100644 index 000000000000..b403094a6b3a --- /dev/null +++ b/drivers/usb/misc/apple-mfi-fastcharge.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Fast-charge control for Apple "MFi" devices + * + * Copyright (C) 2019 Bastien Nocera <hadess@hadess.net> + */ + +/* Standard include files */ +#include <linux/module.h> +#include <linux/power_supply.h> +#include <linux/slab.h> +#include <linux/usb.h> + +MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); +MODULE_DESCRIPTION("Fast-charge control for Apple \"MFi\" devices"); +MODULE_LICENSE("GPL"); + +#define TRICKLE_CURRENT_MA 0 +#define FAST_CURRENT_MA 2500 + +#define APPLE_VENDOR_ID 0x05ac /* Apple */ + +/* The product ID is defined as starting with 0x12nn, as per the + * "Choosing an Apple Device USB Configuration" section in + * release R9 (2012) of the "MFi Accessory Hardware Specification" + * + * To distinguish an Apple device, a USB host can check the device + * descriptor of attached USB devices for the following fields: + * ■ Vendor ID: 0x05AC + * ■ Product ID: 0x12nn + * + * Those checks will be done in .match() and .probe(). + */ + +static const struct usb_device_id mfi_fc_id_table[] = { + { .idVendor = APPLE_VENDOR_ID, + .match_flags = USB_DEVICE_ID_MATCH_VENDOR }, + {}, +}; + +MODULE_DEVICE_TABLE(usb, mfi_fc_id_table); + +/* Driver-local specific stuff */ +struct mfi_device { + struct usb_device *udev; + struct power_supply *battery; + int charge_type; +}; + +static int apple_mfi_fc_set_charge_type(struct mfi_device *mfi, + const union power_supply_propval *val) +{ + int current_ma; + int retval; + __u8 request_type; + + if (mfi->charge_type == val->intval) { + dev_dbg(&mfi->udev->dev, "charge type %d already set\n", + mfi->charge_type); + return 0; + } + + switch (val->intval) { + case POWER_SUPPLY_CHARGE_TYPE_TRICKLE: + current_ma = TRICKLE_CURRENT_MA; + break; + case POWER_SUPPLY_CHARGE_TYPE_FAST: + current_ma = FAST_CURRENT_MA; + break; + default: + return -EINVAL; + } + + request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; + retval = usb_control_msg(mfi->udev, usb_sndctrlpipe(mfi->udev, 0), + 0x40, /* Vendor‐defined power request */ + request_type, + current_ma, /* wValue, current offset */ + current_ma, /* wIndex, current offset */ + NULL, 0, USB_CTRL_GET_TIMEOUT); + if (retval) { + dev_dbg(&mfi->udev->dev, "retval = %d\n", retval); + return retval; + } + + mfi->charge_type = val->intval; + + return 0; +} + +static int apple_mfi_fc_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct mfi_device *mfi = power_supply_get_drvdata(psy); + + dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = mfi->charge_type; + break; + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_DEVICE; + break; + default: + return -ENODATA; + } + + return 0; +} + +static int apple_mfi_fc_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct mfi_device *mfi = power_supply_get_drvdata(psy); + int ret; + + dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); + + ret = pm_runtime_get_sync(&mfi->udev->dev); + if (ret < 0) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_TYPE: + ret = apple_mfi_fc_set_charge_type(mfi, val); + break; + default: + ret = -EINVAL; + } + + pm_runtime_mark_last_busy(&mfi->udev->dev); + pm_runtime_put_autosuspend(&mfi->udev->dev); + + return ret; +} + +static int apple_mfi_fc_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_TYPE: + return 1; + default: + return 0; + } +} + +static enum power_supply_property apple_mfi_fc_properties[] = { + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_SCOPE +}; + +static const struct power_supply_desc apple_mfi_fc_desc = { + .name = "apple_mfi_fastcharge", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = apple_mfi_fc_properties, + .num_properties = ARRAY_SIZE(apple_mfi_fc_properties), + .get_property = apple_mfi_fc_get_property, + .set_property = apple_mfi_fc_set_property, + .property_is_writeable = apple_mfi_fc_property_is_writeable +}; + +static int mfi_fc_probe(struct usb_device *udev) +{ + struct power_supply_config battery_cfg = {}; + struct mfi_device *mfi = NULL; + int err, idProduct; + + idProduct = le16_to_cpu(udev->descriptor.idProduct); + /* See comment above mfi_fc_id_table[] */ + if (idProduct < 0x1200 || idProduct > 0x12ff) { + return -ENODEV; + } + + mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL); + if (!mfi) { + err = -ENOMEM; + goto error; + } + + battery_cfg.drv_data = mfi; + + mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + mfi->battery = power_supply_register(&udev->dev, + &apple_mfi_fc_desc, + &battery_cfg); + if (IS_ERR(mfi->battery)) { + dev_err(&udev->dev, "Can't register battery\n"); + err = PTR_ERR(mfi->battery); + goto error; + } + + mfi->udev = usb_get_dev(udev); + dev_set_drvdata(&udev->dev, mfi); + + return 0; + +error: + kfree(mfi); + return err; +} + +static void mfi_fc_disconnect(struct usb_device *udev) +{ + struct mfi_device *mfi; + + mfi = dev_get_drvdata(&udev->dev); + if (mfi->battery) + power_supply_unregister(mfi->battery); + dev_set_drvdata(&udev->dev, NULL); + usb_put_dev(mfi->udev); + kfree(mfi); +} + +static struct usb_device_driver mfi_fc_driver = { + .name = "apple-mfi-fastcharge", + .probe = mfi_fc_probe, + .disconnect = mfi_fc_disconnect, + .id_table = mfi_fc_id_table, + .generic_subclass = 1, +}; + +static int __init mfi_fc_driver_init(void) +{ + return usb_register_device_driver(&mfi_fc_driver, THIS_MODULE); +} + +static void __exit mfi_fc_driver_exit(void) +{ + usb_deregister_device_driver(&mfi_fc_driver); +} + +module_init(mfi_fc_driver_init); +module_exit(mfi_fc_driver_exit); diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index dce44fbf031f..dce20301e367 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -33,6 +33,14 @@ #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512 /* full speed iowarrior */ #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503 +/* fuller speed iowarrior */ +#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504 +#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505 +#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506 + +/* OEMed devices */ +#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a +#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b /* Get a minor range for your devices from the usb maintainer */ #ifdef CONFIG_USB_DYNAMIC_MINORS @@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = { {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)}, + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, iowarrior_ids); @@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file, } switch (dev->product_id) { case USB_DEVICE_ID_CODEMERCS_IOW24: + case USB_DEVICE_ID_CODEMERCS_IOW24SAG: case USB_DEVICE_ID_CODEMERCS_IOWPV1: case USB_DEVICE_ID_CODEMERCS_IOWPV2: case USB_DEVICE_ID_CODEMERCS_IOW40: @@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file, goto exit; break; case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + case USB_DEVICE_ID_CODEMERCS_IOW100: /* The IOW56 uses asynchronous IO and more urbs */ if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) { /* Wait until we are below the limit for submitted urbs */ @@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case IOW_WRITE: if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 || + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) { @@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface, goto error; } - if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { + if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) { res = usb_find_last_int_out_endpoint(iface_desc, &dev->int_out_endpoint); if (res) { @@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56)) + ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) /* IOWarrior56 has wMaxPacketSize different from report size */ dev->report_size = 7; diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 10c9e7f6273e..29fe5771c21b 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -424,10 +424,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, return err; } - hub->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(hub->vdd)) - return PTR_ERR(hub->vdd); - if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1)) hub->vendor_id = USB251XB_DEF_VENDOR_ID; @@ -640,6 +636,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, } #endif /* CONFIG_OF */ +static void usb251xb_regulator_disable_action(void *data) +{ + struct usb251xb *hub = data; + + regulator_disable(hub->vdd); +} + static int usb251xb_probe(struct usb251xb *hub) { struct device *dev = hub->dev; @@ -676,10 +679,19 @@ static int usb251xb_probe(struct usb251xb *hub) if (err) return err; + hub->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(hub->vdd)) + return PTR_ERR(hub->vdd); + err = regulator_enable(hub->vdd); if (err) return err; + err = devm_add_action_or_reset(dev, + usb251xb_regulator_disable_action, hub); + if (err) + return err; + err = usb251xb_connect(hub); if (err) { dev_err(dev, "Failed to connect hub (%d)\n", err); diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index bc5ecd5ff565..39cb14164652 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -414,7 +414,7 @@ static ssize_t mon_text_read_t(struct file *file, char __user *buf, mon_text_read_head_t(rp, &ptr, ep); mon_text_read_statset(rp, &ptr, ep); - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, " %d", ep->length); mon_text_read_data(rp, &ptr, ep); @@ -462,7 +462,7 @@ static ssize_t mon_text_read_u(struct file *file, char __user *buf, } else { mon_text_read_statset(rp, &ptr, ep); } - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, " %d", ep->length); mon_text_read_data(rp, &ptr, ep); @@ -520,7 +520,7 @@ static void mon_text_read_head_t(struct mon_reader_text *rp, case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break; default: /* PIPE_BULK */ utype = 'B'; } - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "%lx %u %c %c%c:%03u:%02u", ep->id, ep->tstamp, ep->type, utype, udir, ep->devnum, ep->epnum); @@ -538,7 +538,7 @@ static void mon_text_read_head_u(struct mon_reader_text *rp, case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break; default: /* PIPE_BULK */ utype = 'B'; } - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "%lx %u %c %c%c:%d:%03u:%u", ep->id, ep->tstamp, ep->type, utype, udir, ep->busnum, ep->devnum, ep->epnum); @@ -549,7 +549,7 @@ static void mon_text_read_statset(struct mon_reader_text *rp, { if (ep->setup_flag == 0) { /* Setup packet is present and captured */ - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " s %02x %02x %04x %04x %04x", ep->setup[0], ep->setup[1], @@ -557,10 +557,10 @@ static void mon_text_read_statset(struct mon_reader_text *rp, (ep->setup[5] << 8) | ep->setup[4], (ep->setup[7] << 8) | ep->setup[6]); } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */ - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %c __ __ ____ ____ ____", ep->setup_flag); } else { /* No setup for this kind of URB */ - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d", ep->status); } } @@ -568,7 +568,7 @@ static void mon_text_read_statset(struct mon_reader_text *rp, static void mon_text_read_intstat(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d:%d", ep->status, ep->interval); } @@ -576,10 +576,10 @@ static void mon_text_read_isostat(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) { if (ep->type == 'S') { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d:%d:%d", ep->status, ep->interval, ep->start_frame); } else { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d:%d:%d:%d", ep->status, ep->interval, ep->start_frame, ep->error_count); } @@ -592,7 +592,7 @@ static void mon_text_read_isodesc(struct mon_reader_text *rp, int i; const struct mon_iso_desc *dp; - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d", ep->numdesc); ndesc = ep->numdesc; if (ndesc > ISODESC_MAX) @@ -601,7 +601,7 @@ static void mon_text_read_isodesc(struct mon_reader_text *rp, ndesc = 0; dp = ep->isodesc; for (i = 0; i < ndesc; i++) { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %d:%u:%u", dp->status, dp->offset, dp->length); dp++; } @@ -614,28 +614,28 @@ static void mon_text_read_data(struct mon_reader_text *rp, if ((data_len = ep->length) > 0) { if (ep->data_flag == 0) { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " ="); if (data_len >= DATA_MAX) data_len = DATA_MAX; for (i = 0; i < data_len; i++) { if (i % 4 == 0) { - p->cnt += snprintf(p->pbuf + p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " "); } - p->cnt += snprintf(p->pbuf + p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "%02x", ep->data[i]); } - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n"); } else { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, " %c\n", ep->data_flag); } } else { - p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n"); + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n"); } } diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c index 08e18448e8b8..04f666e85731 100644 --- a/drivers/usb/mtu3/mtu3_dr.c +++ b/drivers/usb/mtu3/mtu3_dr.c @@ -320,9 +320,9 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb, mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value); } -static int ssusb_role_sw_set(struct device *dev, enum usb_role role) +static int ssusb_role_sw_set(struct usb_role_switch *sw, enum usb_role role) { - struct ssusb_mtk *ssusb = dev_get_drvdata(dev); + struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw); bool to_host = false; if (role == USB_ROLE_HOST) @@ -334,9 +334,9 @@ static int ssusb_role_sw_set(struct device *dev, enum usb_role role) return 0; } -static enum usb_role ssusb_role_sw_get(struct device *dev) +static enum usb_role ssusb_role_sw_get(struct usb_role_switch *sw) { - struct ssusb_mtk *ssusb = dev_get_drvdata(dev); + struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw); enum usb_role role; role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE; @@ -356,6 +356,7 @@ static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx) role_sx_desc.set = ssusb_role_sw_set; role_sx_desc.get = ssusb_role_sw_get; role_sx_desc.fwnode = dev_fwnode(ssusb->dev); + role_sx_desc.driver_data = ssusb; otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc); return PTR_ERR_OR_ZERO(otg_sx->role_sw); diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index eb2ded1026ee..3b0d1c20ebe6 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -110,9 +110,11 @@ config USB_MUSB_UX500 config USB_MUSB_JZ4740 tristate "JZ4740" + depends on OF depends on MIPS || COMPILE_TEST depends on USB_MUSB_GADGET depends on USB=n || USB_OTG_BLACKLIST_HUB + select USB_ROLE_SWITCH config USB_MUSB_MEDIATEK tristate "MediaTek platforms" @@ -144,7 +146,7 @@ config USB_UX500_DMA config USB_INVENTRA_DMA bool 'Inventra' - depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK + depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK || USB_MUSB_JZ4740 help Enable DMA transfers using Mentor's engine. diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index bc0109f4700b..e64dd30e80e7 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -12,23 +12,29 @@ #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/usb/role.h> #include <linux/usb/usb_phy_generic.h> #include "musb_core.h" struct jz4740_glue { struct platform_device *pdev; + struct musb *musb; struct clk *clk; + struct usb_role_switch *role_sw; }; static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci) { unsigned long flags; - irqreturn_t retval = IRQ_NONE; + irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE; struct musb *musb = __hci; spin_lock_irqsave(&musb->lock, flags); + if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller) + retval_dma = dma_controller_irq(irq, musb->dma_controller); + musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); @@ -46,7 +52,10 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci) spin_unlock_irqrestore(&musb->lock, flags); - return retval; + if (retval == IRQ_HANDLED || retval_dma == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; } static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = { @@ -66,11 +75,40 @@ static const struct musb_hdrc_config jz4740_musb_config = { .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg), }; +static int jz4740_musb_role_switch_set(struct usb_role_switch *sw, + enum usb_role role) +{ + struct jz4740_glue *glue = usb_role_switch_get_drvdata(sw); + struct usb_phy *phy = glue->musb->xceiv; + + switch (role) { + case USB_ROLE_NONE: + atomic_notifier_call_chain(&phy->notifier, USB_EVENT_NONE, phy); + break; + case USB_ROLE_DEVICE: + atomic_notifier_call_chain(&phy->notifier, USB_EVENT_VBUS, phy); + break; + case USB_ROLE_HOST: + atomic_notifier_call_chain(&phy->notifier, USB_EVENT_ID, phy); + break; + } + + return 0; +} + static int jz4740_musb_init(struct musb *musb) { struct device *dev = musb->controller->parent; + struct jz4740_glue *glue = dev_get_drvdata(dev); + struct usb_role_switch_desc role_sw_desc = { + .set = jz4740_musb_role_switch_set, + .driver_data = glue, + .fwnode = dev_fwnode(dev), + }; int err; + glue->musb = musb; + if (dev->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); else @@ -82,6 +120,12 @@ static int jz4740_musb_init(struct musb *musb) return err; } + glue->role_sw = usb_role_switch_register(dev, &role_sw_desc); + if (IS_ERR(glue->role_sw)) { + dev_err(dev, "Failed to register USB role switch"); + return PTR_ERR(glue->role_sw); + } + /* * Silicon does not implement ConfigData register. * Set dyn_fifo to avoid reading EP config from hardware. @@ -93,14 +137,24 @@ static int jz4740_musb_init(struct musb *musb) return 0; } -/* - * DMA has not been confirmed to work with CONFIG_USB_INVENTRA_DMA, - * so let's not set up the dma function pointers yet. - */ +static int jz4740_musb_exit(struct musb *musb) +{ + struct jz4740_glue *glue = dev_get_drvdata(musb->controller->parent); + + usb_role_switch_unregister(glue->role_sw); + + return 0; +} + static const struct musb_platform_ops jz4740_musb_ops = { .quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP, .fifo_mode = 2, .init = jz4740_musb_init, + .exit = jz4740_musb_exit, +#ifdef CONFIG_USB_INVENTRA_DMA + .dma_init = musbhs_dma_controller_create_noirq, + .dma_exit = musbhs_dma_controller_destroy, +#endif }; static const struct musb_hdrc_platform_data jz4740_musb_pdata = { @@ -109,10 +163,37 @@ static const struct musb_hdrc_platform_data jz4740_musb_pdata = { .platform_ops = &jz4740_musb_ops, }; +static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = { + { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, + { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, + { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, +}; + +static struct musb_hdrc_config jz4770_musb_config = { + .multipoint = 1, + .num_eps = 11, + .ram_bits = 11, + .fifo_cfg = jz4770_musb_fifo_cfg, + .fifo_cfg_size = ARRAY_SIZE(jz4770_musb_fifo_cfg), +}; + +static const struct musb_hdrc_platform_data jz4770_musb_pdata = { + .mode = MUSB_PERIPHERAL, /* TODO: support OTG */ + .config = &jz4770_musb_config, + .platform_ops = &jz4740_musb_ops, +}; + static int jz4740_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata; + const struct musb_hdrc_platform_data *pdata; struct platform_device *musb; struct jz4740_glue *glue; struct clk *clk; @@ -122,6 +203,12 @@ static int jz4740_probe(struct platform_device *pdev) if (!glue) return -ENOMEM; + pdata = of_device_get_match_data(dev); + if (!pdata) { + dev_err(dev, "missing platform data"); + return -EINVAL; + } + musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { dev_err(dev, "failed to allocate musb device"); @@ -142,6 +229,8 @@ static int jz4740_probe(struct platform_device *pdev) } musb->dev.parent = dev; + musb->dev.dma_mask = &musb->dev.coherent_dma_mask; + musb->dev.coherent_dma_mask = DMA_BIT_MASK(32); glue->pdev = musb; glue->clk = clk; @@ -186,20 +275,19 @@ static int jz4740_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF static const struct of_device_id jz4740_musb_of_match[] = { - { .compatible = "ingenic,jz4740-musb" }, + { .compatible = "ingenic,jz4740-musb", .data = &jz4740_musb_pdata }, + { .compatible = "ingenic,jz4770-musb", .data = &jz4770_musb_pdata }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, jz4740_musb_of_match); -#endif static struct platform_driver jz4740_driver = { .probe = jz4740_probe, .remove = jz4740_remove, .driver = { .name = "musb-jz4740", - .of_match_table = of_match_ptr(jz4740_musb_of_match), + .of_match_table = jz4740_musb_of_match, }, }; diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c index 6b88c2f5d970..6196b0e8d77d 100644 --- a/drivers/usb/musb/mediatek.c +++ b/drivers/usb/musb/mediatek.c @@ -115,9 +115,8 @@ static void mtk_musb_clks_disable(struct mtk_glue *glue) clk_disable_unprepare(glue->main); } -static int musb_usb_role_sx_set(struct device *dev, enum usb_role role) +static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role) { - struct mtk_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue->musb; u8 devctl = readb(musb->mregs + MUSB_DEVCTL); enum usb_role new_role; @@ -168,9 +167,14 @@ static int musb_usb_role_sx_set(struct device *dev, enum usb_role role) return 0; } -static enum usb_role musb_usb_role_sx_get(struct device *dev) +static int musb_usb_role_sx_set(struct usb_role_switch *sw, enum usb_role role) { - struct mtk_glue *glue = dev_get_drvdata(dev); + return mtk_otg_switch_set(usb_role_switch_get_drvdata(sw), role); +} + +static enum usb_role musb_usb_role_sx_get(struct usb_role_switch *sw) +{ + struct mtk_glue *glue = usb_role_switch_get_drvdata(sw); return glue->role; } @@ -182,6 +186,7 @@ static int mtk_otg_switch_init(struct mtk_glue *glue) role_sx_desc.set = musb_usb_role_sx_set; role_sx_desc.get = musb_usb_role_sx_get; role_sx_desc.fwnode = dev_fwnode(glue->dev); + role_sx_desc.driver_data = glue; glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc); return PTR_ERR_OR_ZERO(glue->role_sw); @@ -288,8 +293,7 @@ static int mtk_musb_set_mode(struct musb *musb, u8 mode) return -EINVAL; } - glue->role = new_role; - musb_usb_role_sx_set(dev, glue->role); + mtk_otg_switch_set(glue, new_role); return 0; } @@ -444,7 +448,7 @@ static int mtk_musb_probe(struct platform_device *pdev) struct platform_device_info pinfo; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - int ret = -ENOMEM; + int ret; glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); if (!glue) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index f616fb489542..d590110539ab 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2945,7 +2945,7 @@ static const struct dev_pm_ops musb_dev_pm_ops = { static struct platform_driver musb_driver = { .driver = { - .name = (char *)musb_driver_name, + .name = musb_driver_name, .bus = &platform_bus_type, .pm = MUSB_DEV_PM_OPS, .dev_groups = musb_groups, diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 886c9b602f8c..8736f4251a22 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -1436,10 +1436,7 @@ done: * We need to map sg if the transfer_buffer is * NULL. */ - if (!urb->transfer_buffer) - qh->use_sg = true; - - if (qh->use_sg) { + if (!urb->transfer_buffer) { /* sg_miter_start is already done in musb_ep_program */ if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); @@ -1447,9 +1444,8 @@ done: status = -EINVAL; goto done; } - urb->transfer_buffer = qh->sg_miter.addr; length = min_t(u32, length, qh->sg_miter.length); - musb_write_fifo(hw_ep, length, urb->transfer_buffer); + musb_write_fifo(hw_ep, length, qh->sg_miter.addr); qh->sg_miter.consumed = length; sg_miter_stop(&qh->sg_miter); } else { @@ -1458,11 +1454,6 @@ done: qh->segsize = length; - if (qh->use_sg) { - if (offset + length >= urb->transfer_buffer_length) - qh->use_sg = false; - } - musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); @@ -1977,8 +1968,10 @@ finish: urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { - if (qh->use_sg) + if (qh->use_sg) { qh->use_sg = false; + urb->transfer_buffer = NULL; + } if (urb->status == -EINPROGRESS) urb->status = status; @@ -2550,7 +2543,7 @@ static int musb_bus_resume(struct usb_hcd *hcd) struct musb_temp_buffer { void *kmalloc_ptr; void *old_xfer_buffer; - u8 data[0]; + u8 data[]; }; static void musb_free_temp_buffer(struct urb *urb) diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 5d449089e3ad..99890d1bbfcb 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -156,7 +156,7 @@ static u8 tusb_readb(void __iomem *addr, u32 offset) return val; } -static void tusb_writeb(void __iomem *addr, unsigned offset, u8 data) +static void tusb_writeb(void __iomem *addr, u32 offset, u8 data) { u16 tmp; diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index ff24fca0a2d9..4b3fa78995cf 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -184,4 +184,12 @@ config USB_ULPI_VIEWPORT Provides read/write operations to the ULPI phy register set for controllers with a viewport register (e.g. Chipidea/ARC controllers). +config JZ4770_PHY + tristate "Ingenic JZ4770 Transceiver Driver" + depends on MIPS || COMPILE_TEST + select USB_PHY + help + This driver provides PHY support for the USB controller found + on the JZ4770 SoC from Ingenic. + endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index df1d99010079..b352bdbe8712 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o +obj-$(CONFIG_JZ4770_PHY) += phy-jz4770.o diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c new file mode 100644 index 000000000000..3ea1f5b9bcf8 --- /dev/null +++ b/drivers/usb/phy/phy-jz4770.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ingenic JZ4770 USB PHY driver + * Copyright (c) Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/usb/otg.h> +#include <linux/usb/phy.h> + +#define REG_USBPCR_OFFSET 0x00 +#define REG_USBRDT_OFFSET 0x04 +#define REG_USBVBFIL_OFFSET 0x08 +#define REG_USBPCR1_OFFSET 0x0c + +/* USBPCR */ +#define USBPCR_USB_MODE BIT(31) +#define USBPCR_AVLD_REG BIT(30) +#define USBPCR_INCRM BIT(27) +#define USBPCR_CLK12_EN BIT(26) +#define USBPCR_COMMONONN BIT(25) +#define USBPCR_VBUSVLDEXT BIT(24) +#define USBPCR_VBUSVLDEXTSEL BIT(23) +#define USBPCR_POR BIT(22) +#define USBPCR_SIDDQ BIT(21) +#define USBPCR_OTG_DISABLE BIT(20) +#define USBPCR_TXPREEMPHTUNE BIT(6) + +#define USBPCR_IDPULLUP_LSB 28 +#define USBPCR_IDPULLUP_MASK GENMASK(29, USBPCR_IDPULLUP_LSB) +#define USBPCR_IDPULLUP_ALWAYS (3 << USBPCR_IDPULLUP_LSB) +#define USBPCR_IDPULLUP_SUSPEND (1 << USBPCR_IDPULLUP_LSB) +#define USBPCR_IDPULLUP_OTG (0 << USBPCR_IDPULLUP_LSB) + +#define USBPCR_COMPDISTUNE_LSB 17 +#define USBPCR_COMPDISTUNE_MASK GENMASK(19, USBPCR_COMPDISTUNE_LSB) +#define USBPCR_COMPDISTUNE_DFT 4 + +#define USBPCR_OTGTUNE_LSB 14 +#define USBPCR_OTGTUNE_MASK GENMASK(16, USBPCR_OTGTUNE_LSB) +#define USBPCR_OTGTUNE_DFT 4 + +#define USBPCR_SQRXTUNE_LSB 11 +#define USBPCR_SQRXTUNE_MASK GENMASK(13, USBPCR_SQRXTUNE_LSB) +#define USBPCR_SQRXTUNE_DFT 3 + +#define USBPCR_TXFSLSTUNE_LSB 7 +#define USBPCR_TXFSLSTUNE_MASK GENMASK(10, USBPCR_TXFSLSTUNE_LSB) +#define USBPCR_TXFSLSTUNE_DFT 3 + +#define USBPCR_TXRISETUNE_LSB 4 +#define USBPCR_TXRISETUNE_MASK GENMASK(5, USBPCR_TXRISETUNE_LSB) +#define USBPCR_TXRISETUNE_DFT 3 + +#define USBPCR_TXVREFTUNE_LSB 0 +#define USBPCR_TXVREFTUNE_MASK GENMASK(3, USBPCR_TXVREFTUNE_LSB) +#define USBPCR_TXVREFTUNE_DFT 5 + +/* USBRDT */ +#define USBRDT_VBFIL_LD_EN BIT(25) +#define USBRDT_IDDIG_EN BIT(24) +#define USBRDT_IDDIG_REG BIT(23) + +#define USBRDT_USBRDT_LSB 0 +#define USBRDT_USBRDT_MASK GENMASK(22, USBRDT_USBRDT_LSB) + +/* USBPCR1 */ +#define USBPCR1_UHC_POWON BIT(5) + +struct jz4770_phy { + struct usb_phy phy; + struct usb_otg otg; + struct device *dev; + void __iomem *base; + struct clk *clk; + struct regulator *vcc_supply; +}; + +static inline struct jz4770_phy *otg_to_jz4770_phy(struct usb_otg *otg) +{ + return container_of(otg, struct jz4770_phy, otg); +} + +static inline struct jz4770_phy *phy_to_jz4770_phy(struct usb_phy *phy) +{ + return container_of(phy, struct jz4770_phy, phy); +} + +static int jz4770_phy_set_peripheral(struct usb_otg *otg, + struct usb_gadget *gadget) +{ + struct jz4770_phy *priv = otg_to_jz4770_phy(otg); + u32 reg; + + reg = readl(priv->base + REG_USBPCR_OFFSET); + reg &= ~USBPCR_USB_MODE; + reg |= USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE; + writel(reg, priv->base + REG_USBPCR_OFFSET); + + return 0; +} + +static int jz4770_phy_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + struct jz4770_phy *priv = otg_to_jz4770_phy(otg); + u32 reg; + + reg = readl(priv->base + REG_USBPCR_OFFSET); + reg &= ~(USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE); + reg |= USBPCR_USB_MODE; + writel(reg, priv->base + REG_USBPCR_OFFSET); + + return 0; +} + +static int jz4770_phy_init(struct usb_phy *phy) +{ + struct jz4770_phy *priv = phy_to_jz4770_phy(phy); + int err; + u32 reg; + + err = regulator_enable(priv->vcc_supply); + if (err) { + dev_err(priv->dev, "Unable to enable VCC: %d", err); + return err; + } + + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(priv->dev, "Unable to start clock: %d", err); + return err; + } + + reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_IDPULLUP_ALWAYS | + (USBPCR_COMPDISTUNE_DFT << USBPCR_COMPDISTUNE_LSB) | + (USBPCR_OTGTUNE_DFT << USBPCR_OTGTUNE_LSB) | + (USBPCR_SQRXTUNE_DFT << USBPCR_SQRXTUNE_LSB) | + (USBPCR_TXFSLSTUNE_DFT << USBPCR_TXFSLSTUNE_LSB) | + (USBPCR_TXRISETUNE_DFT << USBPCR_TXRISETUNE_LSB) | + (USBPCR_TXVREFTUNE_DFT << USBPCR_TXVREFTUNE_LSB) | + USBPCR_POR; + writel(reg, priv->base + REG_USBPCR_OFFSET); + + /* Wait for PHY to reset */ + usleep_range(30, 300); + writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET); + usleep_range(300, 1000); + + return 0; +} + +static void jz4770_phy_shutdown(struct usb_phy *phy) +{ + struct jz4770_phy *priv = phy_to_jz4770_phy(phy); + + clk_disable_unprepare(priv->clk); + regulator_disable(priv->vcc_supply); +} + +static void jz4770_phy_remove(void *phy) +{ + usb_remove_phy(phy); +} + +static int jz4770_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct jz4770_phy *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + priv->dev = dev; + priv->phy.dev = dev; + priv->phy.otg = &priv->otg; + priv->phy.label = "jz4770-phy"; + priv->phy.init = jz4770_phy_init; + priv->phy.shutdown = jz4770_phy_shutdown; + + priv->otg.state = OTG_STATE_UNDEFINED; + priv->otg.usb_phy = &priv->phy; + priv->otg.set_host = jz4770_phy_set_host; + priv->otg.set_peripheral = jz4770_phy_set_peripheral; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + dev_err(dev, "Failed to map registers"); + return PTR_ERR(priv->base); + } + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + err = PTR_ERR(priv->clk); + if (err != -EPROBE_DEFER) + dev_err(dev, "Failed to get clock"); + return err; + } + + priv->vcc_supply = devm_regulator_get(dev, "vcc"); + if (IS_ERR(priv->vcc_supply)) { + err = PTR_ERR(priv->vcc_supply); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to get regulator"); + return err; + } + + err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(dev, "Unable to register PHY"); + return err; + } + + return devm_add_action_or_reset(dev, jz4770_phy_remove, &priv->phy); +} + +#ifdef CONFIG_OF +static const struct of_device_id jz4770_phy_of_matches[] = { + { .compatible = "ingenic,jz4770-phy" }, + { } +}; +MODULE_DEVICE_TABLE(of, jz4770_phy_of_matches); +#endif + +static struct platform_driver jz4770_phy_driver = { + .probe = jz4770_phy_probe, + .driver = { + .name = "jz4770-phy", + .of_match_table = of_match_ptr(jz4770_phy_of_matches), + }, +}; +module_platform_driver(jz4770_phy_driver); + +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_DESCRIPTION("Ingenic JZ4770 USB PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 037e8eee737d..cffe2aced488 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -12,12 +12,11 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/export.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/slab.h> @@ -969,6 +968,10 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, return -ENXIO; } + /* + * Note that UTMI pad registers are shared by all PHYs, therefore + * devm_platform_ioremap_resource() can't be used here. + */ tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!tegra_phy->pad_regs) { @@ -1087,6 +1090,10 @@ static int tegra_usb_phy_probe(struct platform_device *pdev) return -ENXIO; } + /* + * Note that PHY and USB controller are using shared registers, + * therefore devm_platform_ioremap_resource() can't be used here. + */ tegra_phy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!tegra_phy->regs) { diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index 63a00ff26655..5b17709821df 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -48,7 +48,7 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) mutex_lock(&sw->lock); - ret = sw->set(sw->dev.parent, role); + ret = sw->set(sw, role); if (!ret) sw->role = role; @@ -75,7 +75,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) mutex_lock(&sw->lock); if (sw->get) - role = sw->get(sw->dev.parent); + role = sw->get(sw); else role = sw->role; @@ -199,7 +199,7 @@ EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode); static umode_t usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct device *dev = kobj_to_dev(kobj); struct usb_role_switch *sw = to_role_switch(dev); if (sw->allow_userspace_control) @@ -329,7 +329,9 @@ usb_role_switch_register(struct device *parent, sw->dev.fwnode = desc->fwnode; sw->dev.class = role_class; sw->dev.type = &usb_role_dev_type; - dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent)); + dev_set_drvdata(&sw->dev, desc->driver_data); + dev_set_name(&sw->dev, "%s-role-switch", + desc->name ? desc->name : dev_name(parent)); ret = device_register(&sw->dev); if (ret) { @@ -356,6 +358,27 @@ void usb_role_switch_unregister(struct usb_role_switch *sw) } EXPORT_SYMBOL_GPL(usb_role_switch_unregister); +/** + * usb_role_switch_set_drvdata - Assign private data pointer to a switch + * @sw: USB Role Switch + * @data: Private data pointer + */ +void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data) +{ + dev_set_drvdata(&sw->dev, data); +} +EXPORT_SYMBOL_GPL(usb_role_switch_set_drvdata); + +/** + * usb_role_switch_get_drvdata - Get the private data pointer of a switch + * @sw: USB Role Switch + */ +void *usb_role_switch_get_drvdata(struct usb_role_switch *sw) +{ + return dev_get_drvdata(&sw->dev); +} +EXPORT_SYMBOL_GPL(usb_role_switch_get_drvdata); + static int __init usb_roles_init(void) { role_class = class_create(THIS_MODULE, "usb_role"); diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c index 80d6559bbcb2..5c96e929acea 100644 --- a/drivers/usb/roles/intel-xhci-usb-role-switch.c +++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c @@ -42,6 +42,7 @@ #define DRV_NAME "intel_xhci_usb_sw" struct intel_xhci_usb_data { + struct device *dev; struct usb_role_switch *role_sw; void __iomem *base; bool enable_sw_switch; @@ -51,9 +52,10 @@ static const struct software_node intel_xhci_usb_node = { "intel-xhci-usb-sw", }; -static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) +static int intel_xhci_usb_set_role(struct usb_role_switch *sw, + enum usb_role role) { - struct intel_xhci_usb_data *data = dev_get_drvdata(dev); + struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw); unsigned long timeout; acpi_status status; u32 glk, val; @@ -66,11 +68,11 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) */ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk); if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) { - dev_err(dev, "Error could not acquire lock\n"); + dev_err(data->dev, "Error could not acquire lock\n"); return -EIO; } - pm_runtime_get_sync(dev); + pm_runtime_get_sync(data->dev); /* * Set idpin value as requested. @@ -112,7 +114,7 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) do { val = readl(data->base + DUAL_ROLE_CFG1); if (!!(val & HOST_MODE) == (role == USB_ROLE_HOST)) { - pm_runtime_put(dev); + pm_runtime_put(data->dev); return 0; } @@ -120,21 +122,21 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) usleep_range(5000, 10000); } while (time_before(jiffies, timeout)); - pm_runtime_put(dev); + pm_runtime_put(data->dev); - dev_warn(dev, "Timeout waiting for role-switch\n"); + dev_warn(data->dev, "Timeout waiting for role-switch\n"); return -ETIMEDOUT; } -static enum usb_role intel_xhci_usb_get_role(struct device *dev) +static enum usb_role intel_xhci_usb_get_role(struct usb_role_switch *sw) { - struct intel_xhci_usb_data *data = dev_get_drvdata(dev); + struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw); enum usb_role role; u32 val; - pm_runtime_get_sync(dev); + pm_runtime_get_sync(data->dev); val = readl(data->base + DUAL_ROLE_CFG0); - pm_runtime_put(dev); + pm_runtime_put(data->dev); if (!(val & SW_IDPIN)) role = USB_ROLE_HOST; @@ -175,7 +177,9 @@ static int intel_xhci_usb_probe(struct platform_device *pdev) sw_desc.get = intel_xhci_usb_get_role, sw_desc.allow_userspace_control = true, sw_desc.fwnode = software_node_fwnode(&intel_xhci_usb_node); + sw_desc.driver_data = data; + data->dev = dev; data->enable_sw_switch = !device_property_read_bool(dev, "sw_switch_disable"); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index d3f420f3a083..c5ecdcd51ffc 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -205,6 +205,16 @@ static int ch341_get_divisor(speed_t speed) 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1))) div++; + /* + * Prefer lower base clock (fact = 0) if even divisor. + * + * Note that this makes the receiver more tolerant to errors. + */ + if (fact == 1 && div % 2 == 0) { + div /= 2; + fact = 0; + } + return (0x100 - div) << 8 | fact << 2 | ps; } diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 578ebdd86520..91055a191995 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -1472,7 +1472,7 @@ static int digi_read_oob_callback(struct urb *urb) struct usb_serial_port *port = urb->context; struct usb_serial *serial = port->serial; struct tty_struct *tty; - struct digi_port *priv = usb_get_serial_port_data(port); + struct digi_port *priv; unsigned char *buf = urb->transfer_buffer; int opcode, line, status, val; unsigned long flags; diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 43fa1f0716b7..dcda7fb164b4 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Fintek F81232 USB to serial adaptor driver + * Fintek F81532A/534A/535/536 USB to 2/4/8/12 serial adaptor driver * * Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org) * Copyright (C) 2012 Linux Foundation @@ -21,11 +22,45 @@ #include <linux/usb/serial.h> #include <linux/serial_reg.h> -static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1934, 0x0706) }, +#define F81232_ID \ + { USB_DEVICE(0x1934, 0x0706) } /* 1 port UART device */ + +#define F81534A_SERIES_ID \ + { USB_DEVICE(0x2c42, 0x1602) }, /* In-Box 2 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1604) }, /* In-Box 4 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1605) }, /* In-Box 8 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1606) }, /* In-Box 12 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1608) }, /* Non-Flash type */ \ + { USB_DEVICE(0x2c42, 0x1632) }, /* 2 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1634) }, /* 4 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1635) }, /* 8 port UART device */ \ + { USB_DEVICE(0x2c42, 0x1636) } /* 12 port UART device */ + +#define F81534A_CTRL_ID \ + { USB_DEVICE(0x2c42, 0x16f8) } /* Global control device */ + +static const struct usb_device_id f81232_id_table[] = { + F81232_ID, { } /* Terminating entry */ }; -MODULE_DEVICE_TABLE(usb, id_table); + +static const struct usb_device_id f81534a_id_table[] = { + F81534A_SERIES_ID, + { } /* Terminating entry */ +}; + +static const struct usb_device_id f81534a_ctrl_id_table[] = { + F81534A_CTRL_ID, + { } /* Terminating entry */ +}; + +static const struct usb_device_id combined_id_table[] = { + F81232_ID, + F81534A_SERIES_ID, + F81534A_CTRL_ID, + { } /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(usb, combined_id_table); /* Maximum baudrate for F81232 */ #define F81232_MAX_BAUDRATE 1500000 @@ -35,6 +70,7 @@ MODULE_DEVICE_TABLE(usb, id_table); #define F81232_REGISTER_REQUEST 0xa0 #define F81232_GET_REGISTER 0xc0 #define F81232_SET_REGISTER 0x40 +#define F81534A_ACCESS_REG_RETRY 2 #define SERIAL_BASE_ADDRESS 0x0120 #define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS) @@ -61,6 +97,22 @@ MODULE_DEVICE_TABLE(usb, id_table); #define F81232_CLK_14_77_MHZ (BIT(1) | BIT(0)) #define F81232_CLK_MASK GENMASK(1, 0) +#define F81534A_MODE_REG 0x107 +#define F81534A_TRIGGER_MASK GENMASK(3, 2) +#define F81534A_TRIGGER_MULTIPLE_4X BIT(3) +#define F81534A_FIFO_128BYTE (BIT(1) | BIT(0)) + +/* Serial port self GPIO control, 2bytes [control&output data][input data] */ +#define F81534A_GPIO_REG 0x10e +#define F81534A_GPIO_MODE2_DIR BIT(6) /* 1: input, 0: output */ +#define F81534A_GPIO_MODE1_DIR BIT(5) +#define F81534A_GPIO_MODE0_DIR BIT(4) +#define F81534A_GPIO_MODE2_OUTPUT BIT(2) +#define F81534A_GPIO_MODE1_OUTPUT BIT(1) +#define F81534A_GPIO_MODE0_OUTPUT BIT(0) + +#define F81534A_CTRL_CMD_ENABLE_PORT 0x116 + struct f81232_private { struct mutex lock; u8 modem_control; @@ -322,10 +374,38 @@ exit: __func__, retval); } +static char f81232_handle_lsr(struct usb_serial_port *port, u8 lsr) +{ + struct f81232_private *priv = usb_get_serial_port_data(port); + char tty_flag = TTY_NORMAL; + + if (!(lsr & UART_LSR_BRK_ERROR_BITS)) + return tty_flag; + + if (lsr & UART_LSR_BI) { + tty_flag = TTY_BREAK; + port->icount.brk++; + usb_serial_handle_break(port); + } else if (lsr & UART_LSR_PE) { + tty_flag = TTY_PARITY; + port->icount.parity++; + } else if (lsr & UART_LSR_FE) { + tty_flag = TTY_FRAME; + port->icount.frame++; + } + + if (lsr & UART_LSR_OE) { + port->icount.overrun++; + schedule_work(&priv->lsr_work); + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); + } + + return tty_flag; +} + static void f81232_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; - struct f81232_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; char tty_flag; unsigned int i; @@ -341,29 +421,8 @@ static void f81232_process_read_urb(struct urb *urb) /* bulk-in data: [LSR(1Byte)+DATA(1Byte)][LSR(1Byte)+DATA(1Byte)]... */ for (i = 0; i < urb->actual_length; i += 2) { - tty_flag = TTY_NORMAL; lsr = data[i]; - - if (lsr & UART_LSR_BRK_ERROR_BITS) { - if (lsr & UART_LSR_BI) { - tty_flag = TTY_BREAK; - port->icount.brk++; - usb_serial_handle_break(port); - } else if (lsr & UART_LSR_PE) { - tty_flag = TTY_PARITY; - port->icount.parity++; - } else if (lsr & UART_LSR_FE) { - tty_flag = TTY_FRAME; - port->icount.frame++; - } - - if (lsr & UART_LSR_OE) { - port->icount.overrun++; - schedule_work(&priv->lsr_work); - tty_insert_flip_char(&port->port, 0, - TTY_OVERRUN); - } - } + tty_flag = f81232_handle_lsr(port, lsr); if (port->port.console && port->sysrq) { if (usb_serial_handle_sysrq_char(port, data[i + 1])) @@ -376,6 +435,47 @@ static void f81232_process_read_urb(struct urb *urb) tty_flip_buffer_push(&port->port); } +static void f81534a_process_read_urb(struct urb *urb) +{ + struct usb_serial_port *port = urb->context; + unsigned char *data = urb->transfer_buffer; + char tty_flag; + unsigned int i; + u8 lsr; + u8 len; + + if (urb->actual_length < 3) { + dev_err(&port->dev, "short message received: %d\n", + urb->actual_length); + return; + } + + len = data[0]; + if (len != urb->actual_length) { + dev_err(&port->dev, "malformed message received: %d (%d)\n", + urb->actual_length, len); + return; + } + + /* bulk-in data: [LEN][Data.....][LSR] */ + lsr = data[len - 1]; + tty_flag = f81232_handle_lsr(port, lsr); + + if (port->port.console && port->sysrq) { + for (i = 1; i < len - 1; ++i) { + if (!usb_serial_handle_sysrq_char(port, data[i])) { + tty_insert_flip_char(&port->port, data[i], + tty_flag); + } + } + } else { + tty_insert_flip_string_fixed_flag(&port->port, &data[1], + tty_flag, len - 2); + } + + tty_flip_buffer_push(&port->port); +} + static void f81232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; @@ -659,6 +759,24 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) return 0; } +static int f81534a_open(struct tty_struct *tty, struct usb_serial_port *port) +{ + int status; + u8 mask; + u8 val; + + val = F81534A_TRIGGER_MULTIPLE_4X | F81534A_FIFO_128BYTE; + mask = F81534A_TRIGGER_MASK | F81534A_FIFO_128BYTE; + + status = f81232_set_mask_register(port, F81534A_MODE_REG, mask, val); + if (status) { + dev_err(&port->dev, "failed to set MODE_REG: %d\n", status); + return status; + } + + return f81232_open(tty, port); +} + static void f81232_close(struct usb_serial_port *port) { struct f81232_private *port_priv = usb_get_serial_port_data(port); @@ -678,6 +796,20 @@ static void f81232_dtr_rts(struct usb_serial_port *port, int on) f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); } +static bool f81232_tx_empty(struct usb_serial_port *port) +{ + int status; + u8 tmp; + + status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp); + if (!status) { + if ((tmp & UART_LSR_TEMT) != UART_LSR_TEMT) + return false; + } + + return true; +} + static int f81232_carrier_raised(struct usb_serial_port *port) { u8 msr; @@ -728,11 +860,98 @@ static void f81232_lsr_worker(struct work_struct *work) dev_warn(&port->dev, "read LSR failed: %d\n", status); } +static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg, + u16 size, void *val) +{ + struct usb_device *dev = interface_to_usbdev(intf); + int retry = F81534A_ACCESS_REG_RETRY; + int status; + u8 *tmp; + + tmp = kmemdup(val, size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (retry--) { + status = usb_control_msg(dev, + usb_sndctrlpipe(dev, 0), + F81232_REGISTER_REQUEST, + F81232_SET_REGISTER, + reg, + 0, + tmp, + size, + USB_CTRL_SET_TIMEOUT); + if (status < 0) { + status = usb_translate_errors(status); + if (status == -EIO) + continue; + } else if (status != size) { + /* Retry on short transfers */ + status = -EIO; + continue; + } else { + status = 0; + } + + break; + } + + if (status) { + dev_err(&intf->dev, "failed to set register 0x%x: %d\n", + reg, status); + } + + kfree(tmp); + return status; +} + +static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en) +{ + unsigned char enable[2] = {0}; + int status; + + /* + * Enable all available serial ports, define as following: + * bit 15 : Reset behavior (when HUB got soft reset) + * 0: maintain all serial port enabled state. + * 1: disable all serial port. + * bit 0~11 : Serial port enable bit. + */ + if (en) { + enable[0] = 0xff; + enable[1] = 0x8f; + } + + status = f81534a_ctrl_set_register(intf, F81534A_CTRL_CMD_ENABLE_PORT, + sizeof(enable), enable); + if (status) + dev_err(&intf->dev, "failed to enable ports: %d\n", status); + + return status; +} + +static int f81534a_ctrl_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + return f81534a_ctrl_enable_all_ports(intf, true); +} + +static void f81534a_ctrl_disconnect(struct usb_interface *intf) +{ + f81534a_ctrl_enable_all_ports(intf, false); +} + +static int f81534a_ctrl_resume(struct usb_interface *intf) +{ + return f81534a_ctrl_enable_all_ports(intf, true); +} + static int f81232_port_probe(struct usb_serial_port *port) { struct f81232_private *priv; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&port->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -748,14 +967,17 @@ static int f81232_port_probe(struct usb_serial_port *port) return 0; } -static int f81232_port_remove(struct usb_serial_port *port) +static int f81534a_port_probe(struct usb_serial_port *port) { - struct f81232_private *priv; + int status; - priv = usb_get_serial_port_data(port); - kfree(priv); + /* tri-state with pull-high, default RS232 Mode */ + status = f81232_set_register(port, F81534A_GPIO_REG, + F81534A_GPIO_MODE2_DIR); + if (status) + return status; - return 0; + return f81232_port_probe(port); } static int f81232_suspend(struct usb_serial *serial, pm_message_t message) @@ -799,7 +1021,7 @@ static struct usb_serial_driver f81232_device = { .owner = THIS_MODULE, .name = "f81232", }, - .id_table = id_table, + .id_table = f81232_id_table, .num_ports = 1, .bulk_in_size = 256, .bulk_out_size = 256, @@ -813,22 +1035,82 @@ static struct usb_serial_driver f81232_device = { .tiocmget = f81232_tiocmget, .tiocmset = f81232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, + .tx_empty = f81232_tx_empty, .process_read_urb = f81232_process_read_urb, .read_int_callback = f81232_read_int_callback, .port_probe = f81232_port_probe, - .port_remove = f81232_port_remove, + .suspend = f81232_suspend, + .resume = f81232_resume, +}; + +static struct usb_serial_driver f81534a_device = { + .driver = { + .owner = THIS_MODULE, + .name = "f81534a", + }, + .id_table = f81534a_id_table, + .num_ports = 1, + .open = f81534a_open, + .close = f81232_close, + .dtr_rts = f81232_dtr_rts, + .carrier_raised = f81232_carrier_raised, + .get_serial = f81232_get_serial_info, + .break_ctl = f81232_break_ctl, + .set_termios = f81232_set_termios, + .tiocmget = f81232_tiocmget, + .tiocmset = f81232_tiocmset, + .tiocmiwait = usb_serial_generic_tiocmiwait, + .tx_empty = f81232_tx_empty, + .process_read_urb = f81534a_process_read_urb, + .read_int_callback = f81232_read_int_callback, + .port_probe = f81534a_port_probe, .suspend = f81232_suspend, .resume = f81232_resume, }; static struct usb_serial_driver * const serial_drivers[] = { &f81232_device, + &f81534a_device, NULL, }; -module_usb_serial_driver(serial_drivers, id_table); +static struct usb_driver f81534a_ctrl_driver = { + .name = "f81534a_ctrl", + .id_table = f81534a_ctrl_id_table, + .probe = f81534a_ctrl_probe, + .disconnect = f81534a_ctrl_disconnect, + .resume = f81534a_ctrl_resume, +}; + +static int __init f81232_init(void) +{ + int status; + + status = usb_register_driver(&f81534a_ctrl_driver, THIS_MODULE, + KBUILD_MODNAME); + if (status) + return status; + + status = usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, + combined_id_table); + if (status) { + usb_deregister(&f81534a_ctrl_driver); + return status; + } + + return 0; +} + +static void __exit f81232_exit(void) +{ + usb_serial_deregister_drivers(serial_drivers); + usb_deregister(&f81534a_ctrl_driver); +} + +module_init(f81232_init); +module_exit(f81232_exit); -MODULE_DESCRIPTION("Fintek F81232 USB to serial adaptor driver"); +MODULE_DESCRIPTION("Fintek F81232/532A/534A/535/536 USB to serial driver"); MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>"); MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 1be8bea372a2..5cdf180cda23 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -417,7 +417,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb) /* * Make sure URB is marked as free before checking the throttled flag * to avoid racing with unthrottle() on another CPU. Matches the - * smp_mb() in unthrottle(). + * smp_mb__after_atomic() in unthrottle(). */ smp_mb__after_atomic(); @@ -489,7 +489,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty) * Matches the smp_mb__after_atomic() in * usb_serial_generic_read_bulk_callback(). */ - smp_mb(); + smp_mb__after_atomic(); usb_serial_generic_submit_read_urbs(port, GFP_KERNEL); } @@ -609,12 +609,10 @@ EXPORT_SYMBOL_GPL(usb_serial_handle_break); * @tty: tty for the port * @status: new carrier detect status, nonzero if active */ -void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, +void usb_serial_handle_dcd_change(struct usb_serial_port *port, struct tty_struct *tty, unsigned int status) { - struct tty_port *port = &usb_port->port; - - dev_dbg(&usb_port->dev, "%s - status %d\n", __func__, status); + dev_dbg(&port->dev, "%s - status %d\n", __func__, status); if (tty) { struct tty_ldisc *ld = tty_ldisc_ref(tty); @@ -627,7 +625,7 @@ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, } if (status) - wake_up_interruptible(&port->open_wait); + wake_up_interruptible(&port->port.open_wait); else if (tty && !C_CLOCAL(tty)) tty_hangup(tty); } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 5737add6a2a4..4cca0b836f43 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -710,7 +710,7 @@ static void edge_interrupt_callback(struct urb *urb) /* grab the txcredits for the ports if available */ position = 2; portNumber = 0; - while ((position < length) && + while ((position < length - 1) && (portNumber < edge_serial->serial->num_ports)) { txCredits = data[position] | (data[position+1] << 8); if (txCredits) { diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index c38e87ac5ea9..0d1a5bb4636e 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -593,7 +593,7 @@ struct ti_i2c_desc { __u8 Type; // Type of descriptor __le16 Size; // Size of data only not including header __u8 CheckSum; // Checksum (8 bit sum of data only) - __u8 Data[0]; // Data starts here + __u8 Data[]; // Data starts here } __attribute__((packed)); // for 5152 devices only (type 2 record) @@ -601,7 +601,7 @@ struct ti_i2c_desc { struct ti_i2c_firmware_rec { __u8 Ver_Major; // Firmware Major version number __u8 Ver_Minor; // Firmware Minor version number - __u8 Data[0]; // Download starts here + __u8 Data[]; // Download starts here } __attribute__((packed)); diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 79d0586e2b33..172261a908d8 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -448,7 +448,7 @@ static void ir_set_termios(struct tty_struct *tty, usb_sndbulkpipe(udev, port->bulk_out_endpointAddress), transfer_buffer, 1, &actual_length, 5000); if (ret || actual_length != 1) { - if (actual_length != 1) + if (!ret) ret = -EIO; dev_err(&port->dev, "failed to change line speed: %d\n", ret); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 084cc2fff3ae..8bfffca3e4ae 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1183,6 +1183,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1990,8 +1992,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ + { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */ + .driver_info = RSVD(1) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */ + .driver_info = RSVD(1) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index aab737e1e7b6..c5a2995dfa2e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a019ea7e6e0e..52db5519aaf0 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -130,6 +130,7 @@ #define HP_LM920_PRODUCT_ID 0x026b #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 +#define HP_LD381_PRODUCT_ID 0x0f7f #define HP_LCM220_PRODUCT_ID 0x3139 #define HP_LCM960_PRODUCT_ID 0x3239 #define HP_LD220_PRODUCT_ID 0x3524 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index ef23acc9b9ce..73075b9351c5 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -219,7 +219,7 @@ struct ti_write_data_bytes { u8 bDataCounter; __be16 wBaseAddrHi; __be16 wBaseAddrLo; - u8 bData[0]; + u8 bData[]; } __packed; struct ti_read_data_request { @@ -234,7 +234,7 @@ struct ti_read_data_bytes { __u8 bCmdCode; __u8 bModuleId; __u8 bErrorCode; - __u8 bData[0]; + __u8 bData[]; } __packed; /* Interrupt struct */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index dc7a65b9ec98..27e3bb58c872 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -288,7 +288,7 @@ static void serial_close(struct tty_struct *tty, struct file *filp) /** * serial_cleanup - free resources post close/hangup - * @port: port to free up + * @tty: tty to clean up * * Do the resource freeing and refcount dropping for the port. * Avoid freeing the console. diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 95bba3ba6ac6..3670fda02c34 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -45,6 +45,7 @@ struct uas_dev_info { struct scsi_cmnd *cmnd[MAX_CMNDS]; spinlock_t lock; struct work_struct work; + struct work_struct scan_work; /* for async scanning */ }; enum { @@ -114,6 +115,17 @@ out: spin_unlock_irqrestore(&devinfo->lock, flags); } +static void uas_scan_work(struct work_struct *work) +{ + struct uas_dev_info *devinfo = + container_of(work, struct uas_dev_info, scan_work); + struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf); + + dev_dbg(&devinfo->intf->dev, "starting scan\n"); + scsi_scan_host(shost); + dev_dbg(&devinfo->intf->dev, "scan complete\n"); +} + static void uas_add_work(struct uas_cmd_info *cmdinfo) { struct scsi_pointer *scp = (void *)cmdinfo; @@ -982,6 +994,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) init_usb_anchor(&devinfo->data_urbs); spin_lock_init(&devinfo->lock); INIT_WORK(&devinfo->work, uas_do_work); + INIT_WORK(&devinfo->scan_work, uas_scan_work); result = uas_configure_endpoints(devinfo); if (result) @@ -998,7 +1011,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) if (result) goto free_streams; - scsi_scan_host(shost); + /* Submit the delayed_work for SCSI-device scanning */ + schedule_work(&devinfo->scan_work); + return result; free_streams: @@ -1166,6 +1181,12 @@ static void uas_disconnect(struct usb_interface *intf) usb_kill_anchored_urbs(&devinfo->data_urbs); uas_zap_pending(devinfo, DID_NO_CONNECT); + /* + * Prevent SCSI scanning (if it hasn't started yet) + * or wait for the SCSI-scanning routine to stop. + */ + cancel_work_sync(&devinfo->scan_work); + scsi_remove_host(shost); uas_free_streams(devinfo); scsi_host_put(shost); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1cd9b6305b06..1880f3e13f57 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, USB_SC_RBC, USB_PR_BULK, NULL, 0 ), +UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100, + "Samsung", + "Flash Drive FIT", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* aeb */ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, "Feiya", diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 9a79cd9762f3..94a64729dc27 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -121,12 +121,12 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); .initFunction = init_function, \ } -static struct us_unusual_dev us_unusual_dev_list[] = { +static const struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" { } /* Terminating entry */ }; -static struct us_unusual_dev for_dynamic_ids = +static const struct us_unusual_dev for_dynamic_ids = USUAL_DEV(USB_SC_SCSI, USB_PR_BULK); #undef UNUSUAL_DEV @@ -583,7 +583,7 @@ EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks); /* Get the unusual_devs entries and the string descriptors */ static int get_device_info(struct us_data *us, const struct usb_device_id *id, - struct us_unusual_dev *unusual_dev) + const struct us_unusual_dev *unusual_dev) { struct usb_device *dev = us->pusb_dev; struct usb_interface_descriptor *idesc = @@ -933,7 +933,7 @@ static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) int usb_stor_probe1(struct us_data **pus, struct usb_interface *intf, const struct usb_device_id *id, - struct us_unusual_dev *unusual_dev, + const struct us_unusual_dev *unusual_dev, struct scsi_host_template *sht) { struct Scsi_Host *host; @@ -1092,7 +1092,7 @@ static struct scsi_host_template usb_stor_host_template; static int storage_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct us_unusual_dev *unusual_dev; + const struct us_unusual_dev *unusual_dev; struct us_data *us; int result; int size; diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h index 85052cd66839..5850d624cac7 100644 --- a/drivers/usb/storage/usb.h +++ b/drivers/usb/storage/usb.h @@ -93,7 +93,8 @@ struct us_data { struct mutex dev_mutex; /* protect pusb_dev */ struct usb_device *pusb_dev; /* this usb_device */ struct usb_interface *pusb_intf; /* this interface */ - struct us_unusual_dev *unusual_dev; /* device-filter entry */ + const struct us_unusual_dev *unusual_dev; + /* device-filter entry */ unsigned long fflags; /* fixed flags from filter */ unsigned long dflags; /* dynamic atomic bitflags */ unsigned int send_bulk_pipe; /* cached pipe values */ @@ -185,7 +186,7 @@ extern int usb_stor_post_reset(struct usb_interface *iface); extern int usb_stor_probe1(struct us_data **pus, struct usb_interface *intf, const struct usb_device_id *id, - struct us_unusual_dev *unusual_dev, + const struct us_unusual_dev *unusual_dev, struct scsi_host_template *sht); extern int usb_stor_probe2(struct us_data *us); extern void usb_stor_disconnect(struct usb_interface *intf); diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index cfd12e523678..529512827d8f 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -40,7 +40,7 @@ .driver_info = (flags) \ } -struct usb_device_id usb_storage_usb_ids[] = { +const struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" { } /* Terminating entry */ }; @@ -68,7 +68,7 @@ struct ignore_entry { .bcdmax = bcdDeviceMax, \ } -static struct ignore_entry ignore_ids[] = { +static const struct ignore_entry ignore_ids[] = { # include "unusual_alauda.h" # include "unusual_cypress.h" # include "unusual_datafab.h" @@ -92,7 +92,7 @@ int usb_usual_ignore_device(struct usb_interface *intf) { struct usb_device *udev; unsigned vid, pid, bcd; - struct ignore_entry *p; + const struct ignore_entry *p; udev = interface_to_usbdev(intf); vid = le16_to_cpu(udev->descriptor.idVendor); diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 2e45eb479386..c823122f9cb7 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -30,17 +30,10 @@ static int typec_altmode_set_state(struct typec_altmode *adev, { bool is_port = is_typec_port(adev->dev.parent); struct altmode *port_altmode; - int ret; port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner; - ret = typec_altmode_set_mux(port_altmode, conf, data); - if (ret) - return ret; - - blocking_notifier_call_chain(&port_altmode->nh, conf, NULL); - - return 0; + return typec_altmode_set_mux(port_altmode, conf, data); } /* -------------------------------------------------------------------------- */ @@ -82,9 +75,6 @@ int typec_altmode_notify(struct typec_altmode *adev, if (ret) return ret; - blocking_notifier_call_chain(is_port ? &altmode->nh : &partner->nh, - conf, data); - if (partner->adev.ops && partner->adev.ops->notify) return partner->adev.ops->notify(&partner->adev, conf, data); diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h index 0c9661c96473..8ba8112d2740 100644 --- a/drivers/usb/typec/bus.h +++ b/drivers/usb/typec/bus.h @@ -22,8 +22,6 @@ struct altmode { struct altmode *partner; struct altmode *plug[2]; - - struct blocking_notifier_head nh; }; #define to_altmode(d) container_of(d, struct altmode, adev) diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 7c44e930602f..8d894bdff77d 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -206,69 +206,6 @@ static void typec_altmode_put_partner(struct altmode *altmode) put_device(&adev->dev); } -static void *typec_port_match(struct device_connection *con, int ep, void *data) -{ - struct device *dev; - - /* - * FIXME: Check does the fwnode supports the requested SVID. If it does - * we need to return ERR_PTR(-PROBE_DEFER) when there is no device. - */ - if (con->fwnode) - return class_find_device_by_fwnode(typec_class, con->fwnode); - - dev = class_find_device_by_name(typec_class, con->endpoint[ep]); - - return dev ? dev : ERR_PTR(-EPROBE_DEFER); -} - -struct typec_altmode * -typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode, - struct notifier_block *nb) -{ - struct typec_device_id id = { svid, mode, }; - struct device *altmode_dev; - struct device *port_dev; - struct altmode *altmode; - int ret; - - /* Find the port linked to the caller */ - port_dev = device_connection_find_match(dev, NULL, NULL, - typec_port_match); - if (IS_ERR_OR_NULL(port_dev)) - return port_dev ? ERR_CAST(port_dev) : ERR_PTR(-ENODEV); - - /* Find the altmode with matching svid */ - altmode_dev = device_find_child(port_dev, &id, altmode_match); - - put_device(port_dev); - - if (!altmode_dev) - return ERR_PTR(-ENODEV); - - altmode = to_altmode(to_typec_altmode(altmode_dev)); - - /* Register notifier */ - ret = blocking_notifier_chain_register(&altmode->nh, nb); - if (ret) { - put_device(altmode_dev); - return ERR_PTR(ret); - } - - return &altmode->adev; -} -EXPORT_SYMBOL_GPL(typec_altmode_register_notifier); - -void typec_altmode_unregister_notifier(struct typec_altmode *adev, - struct notifier_block *nb) -{ - struct altmode *altmode = to_altmode(adev); - - blocking_notifier_chain_unregister(&altmode->nh, nb); - put_device(&adev->dev); -} -EXPORT_SYMBOL_GPL(typec_altmode_unregister_notifier); - /** * typec_altmode_update_active - Report Enter/Exit mode * @adev: Handle to the alternate mode @@ -432,7 +369,28 @@ static struct attribute *typec_altmode_attrs[] = { &dev_attr_vdo.attr, NULL }; -ATTRIBUTE_GROUPS(typec_altmode); + +static umode_t typec_altmode_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct typec_altmode *adev = to_typec_altmode(kobj_to_dev(kobj)); + + if (attr == &dev_attr_active.attr) + if (!adev->ops || !adev->ops->activate) + return 0444; + + return attr->mode; +} + +static struct attribute_group typec_altmode_group = { + .is_visible = typec_altmode_attr_is_visible, + .attrs = typec_altmode_attrs, +}; + +static const struct attribute_group *typec_altmode_groups[] = { + &typec_altmode_group, + NULL +}; static int altmode_id_get(struct device *dev) { @@ -517,9 +475,7 @@ typec_register_altmode(struct device *parent, dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id); /* Link partners and plugs with the ports */ - if (is_port) - BLOCKING_INIT_NOTIFIER_HEAD(&alt->nh); - else + if (!is_port) typec_altmode_set_partner(alt); /* The partners are bind to drivers */ @@ -859,7 +815,7 @@ struct typec_cable *typec_cable_get(struct typec_port *port) EXPORT_SYMBOL_GPL(typec_cable_get); /** - * typec_cable_get - Decrement the reference count on USB Type-C cable + * typec_cable_put - Decrement the reference count on USB Type-C cable * @cable: The USB Type-C cable */ void typec_cable_put(struct typec_cable *cable) @@ -1091,11 +1047,6 @@ static ssize_t power_role_store(struct device *dev, struct typec_port *port = to_typec_port(dev); int ret; - if (!port->cap->pd_revision) { - dev_dbg(dev, "USB Power Delivery not supported\n"); - return -EOPNOTSUPP; - } - if (!port->ops || !port->ops->pr_set) { dev_dbg(dev, "power role swapping not supported\n"); return -EOPNOTSUPP; @@ -1293,6 +1244,25 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev, } static DEVICE_ATTR_RO(usb_power_delivery_revision); +static ssize_t orientation_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct typec_port *p = to_typec_port(dev); + enum typec_orientation orientation = typec_get_orientation(p); + + switch (orientation) { + case TYPEC_ORIENTATION_NORMAL: + return sprintf(buf, "%s\n", "normal"); + case TYPEC_ORIENTATION_REVERSE: + return sprintf(buf, "%s\n", "reverse"); + case TYPEC_ORIENTATION_NONE: + default: + return sprintf(buf, "%s\n", "unknown"); + } +} +static DEVICE_ATTR_RO(orientation); + static struct attribute *typec_attrs[] = { &dev_attr_data_role.attr, &dev_attr_power_operation_mode.attr, @@ -1303,9 +1273,54 @@ static struct attribute *typec_attrs[] = { &dev_attr_usb_typec_revision.attr, &dev_attr_vconn_source.attr, &dev_attr_port_type.attr, + &dev_attr_orientation.attr, NULL, }; -ATTRIBUTE_GROUPS(typec); + +static umode_t typec_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct typec_port *port = to_typec_port(kobj_to_dev(kobj)); + + if (attr == &dev_attr_data_role.attr) { + if (port->cap->data != TYPEC_PORT_DRD || + !port->ops || !port->ops->dr_set) + return 0444; + } else if (attr == &dev_attr_power_role.attr) { + if (port->cap->type != TYPEC_PORT_DRP || + !port->ops || !port->ops->pr_set) + return 0444; + } else if (attr == &dev_attr_vconn_source.attr) { + if (!port->cap->pd_revision || + !port->ops || !port->ops->vconn_set) + return 0444; + } else if (attr == &dev_attr_preferred_role.attr) { + if (port->cap->type != TYPEC_PORT_DRP || + !port->ops || !port->ops->try_role) + return 0444; + } else if (attr == &dev_attr_port_type.attr) { + if (!port->ops || !port->ops->port_type_set) + return 0; + if (port->cap->type != TYPEC_PORT_DRP) + return 0444; + } else if (attr == &dev_attr_orientation.attr) { + if (port->cap->orientation_aware) + return 0444; + return 0; + } + + return attr->mode; +} + +static struct attribute_group typec_group = { + .is_visible = typec_attr_is_visible, + .attrs = typec_attrs, +}; + +static const struct attribute_group *typec_groups[] = { + &typec_group, + NULL +}; static int typec_uevent(struct device *dev, struct kobj_uevent_env *env) { @@ -1495,13 +1510,13 @@ int typec_set_orientation(struct typec_port *port, { int ret; - if (port->sw) { - ret = port->sw->set(port->sw, orientation); - if (ret) - return ret; - } + ret = typec_switch_set(port->sw, orientation); + if (ret) + return ret; port->orientation = orientation; + sysfs_notify(&port->dev.kobj, NULL, "orientation"); + kobject_uevent(&port->dev.kobj, KOBJ_CHANGE); return 0; } @@ -1533,7 +1548,7 @@ int typec_set_mode(struct typec_port *port, int mode) state.mode = mode; - return port->mux ? port->mux->set(port->mux, &state) : 0; + return typec_mux_set(port->mux, &state); } EXPORT_SYMBOL_GPL(typec_set_mode); diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index 5baf0f416c73..52ad277e4565 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -17,11 +17,6 @@ #include "bus.h" -static int name_match(struct device *dev, const void *name) -{ - return !strcmp((const char *)name, dev_name(dev)); -} - static bool dev_name_ends_with(struct device *dev, const char *suffix) { const char *name = dev_name(dev); @@ -44,41 +39,36 @@ static void *typec_switch_match(struct device_connection *con, int ep, { struct device *dev; - if (con->fwnode) { - if (con->id && !fwnode_property_present(con->fwnode, con->id)) - return NULL; + if (con->id && !fwnode_property_present(con->fwnode, con->id)) + return NULL; - dev = class_find_device(&typec_mux_class, NULL, con->fwnode, - switch_fwnode_match); - } else { - dev = class_find_device(&typec_mux_class, NULL, - con->endpoint[ep], name_match); - } + dev = class_find_device(&typec_mux_class, NULL, con->fwnode, + switch_fwnode_match); return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER); } /** - * typec_switch_get - Find USB Type-C orientation switch - * @dev: The caller device + * fwnode_typec_switch_get - Find USB Type-C orientation switch + * @fwnode: The caller device node * * Finds a switch linked with @dev. Returns a reference to the switch on * success, NULL if no matching connection was found, or * ERR_PTR(-EPROBE_DEFER) when a connection was found but the switch * has not been enumerated yet. */ -struct typec_switch *typec_switch_get(struct device *dev) +struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode) { struct typec_switch *sw; - sw = device_connection_find_match(dev, "orientation-switch", NULL, + sw = fwnode_connection_find_match(fwnode, "orientation-switch", NULL, typec_switch_match); if (!IS_ERR_OR_NULL(sw)) WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); return sw; } -EXPORT_SYMBOL_GPL(typec_switch_get); +EXPORT_SYMBOL_GPL(fwnode_typec_switch_get); /** * typec_put_switch - Release USB Type-C orientation switch @@ -137,7 +127,8 @@ typec_switch_register(struct device *parent, sw->dev.class = &typec_mux_class; sw->dev.type = &typec_switch_dev_type; sw->dev.driver_data = desc->drvdata; - dev_set_name(&sw->dev, "%s-switch", dev_name(parent)); + dev_set_name(&sw->dev, "%s-switch", + desc->name ? desc->name : dev_name(parent)); ret = device_add(&sw->dev); if (ret) { @@ -150,6 +141,16 @@ typec_switch_register(struct device *parent, } EXPORT_SYMBOL_GPL(typec_switch_register); +int typec_switch_set(struct typec_switch *sw, + enum typec_orientation orientation) +{ + if (IS_ERR_OR_NULL(sw)) + return 0; + + return sw->set(sw, orientation); +} +EXPORT_SYMBOL_GPL(typec_switch_set); + /** * typec_switch_unregister - Unregister USB Type-C orientation switch * @sw: USB Type-C orientation switch @@ -191,13 +192,6 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data) u16 *val; int i; - if (!con->fwnode) { - dev = class_find_device(&typec_mux_class, NULL, - con->endpoint[ep], name_match); - - return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER); - } - /* * Check has the identifier already been "consumed". If it * has, no need to do any extra connection identification. @@ -247,8 +241,8 @@ find_mux: } /** - * typec_mux_get - Find USB Type-C Multiplexer - * @dev: The caller device + * fwnode_typec_mux_get - Find USB Type-C Multiplexer + * @fwnode: The caller device node * @desc: Alt Mode description * * Finds a mux linked to the caller. This function is primarily meant for the @@ -256,19 +250,19 @@ find_mux: * matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection * was found but the mux has not been enumerated yet. */ -struct typec_mux *typec_mux_get(struct device *dev, - const struct typec_altmode_desc *desc) +struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode, + const struct typec_altmode_desc *desc) { struct typec_mux *mux; - mux = device_connection_find_match(dev, "mode-switch", (void *)desc, + mux = fwnode_connection_find_match(fwnode, "mode-switch", (void *)desc, typec_mux_match); if (!IS_ERR_OR_NULL(mux)) WARN_ON(!try_module_get(mux->dev.parent->driver->owner)); return mux; } -EXPORT_SYMBOL_GPL(typec_mux_get); +EXPORT_SYMBOL_GPL(fwnode_typec_mux_get); /** * typec_mux_put - Release handle to a Multiplexer @@ -285,6 +279,15 @@ void typec_mux_put(struct typec_mux *mux) } EXPORT_SYMBOL_GPL(typec_mux_put); +int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state) +{ + if (IS_ERR_OR_NULL(mux)) + return 0; + + return mux->set(mux, state); +} +EXPORT_SYMBOL_GPL(typec_mux_set); + static void typec_mux_release(struct device *dev) { kfree(to_typec_mux(dev)); @@ -326,7 +329,8 @@ typec_mux_register(struct device *parent, const struct typec_mux_desc *desc) mux->dev.class = &typec_mux_class; mux->dev.type = &typec_mux_dev_type; mux->dev.driver_data = desc->drvdata; - dev_set_name(&mux->dev, "%s-mux", dev_name(parent)); + dev_set_name(&mux->dev, "%s-mux", + desc->name ? desc->name : dev_name(parent)); ret = device_add(&mux->dev); if (ret) { diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig index 01ed0d5e10e8..77eb97b2aa86 100644 --- a/drivers/usb/typec/mux/Kconfig +++ b/drivers/usb/typec/mux/Kconfig @@ -9,4 +9,13 @@ config TYPEC_MUX_PI3USB30532 Say Y or M if your system has a Pericom PI3USB30532 Type-C cross switch / mux chip found on some devices with a Type-C port. +config TYPEC_MUX_INTEL_PMC + tristate "Intel PMC mux control" + depends on INTEL_PMC_IPC + select USB_ROLE_SWITCH + help + Driver for USB muxes controlled by Intel PMC FW. Intel PMC FW can + control the USB role switch and also the multiplexer/demultiplexer + switches used with USB Type-C Alternate Modes. + endmenu diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile index 1332e469b8a0..280a6f553115 100644 --- a/drivers/usb/typec/mux/Makefile +++ b/drivers/usb/typec/mux/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o +obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c new file mode 100644 index 000000000000..f5c5e0aef66f --- /dev/null +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Intel PMC USB mux control + * + * Copyright (C) 2020 Intel Corporation + * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/usb/role.h> +#include <linux/usb/typec_mux.h> +#include <linux/usb/typec_dp.h> +#include <linux/usb/typec_tbt.h> + +#include <asm/intel_pmc_ipc.h> + +#define PMC_USBC_CMD 0xa7 + +/* "Usage" OOB Message field values */ +enum { + PMC_USB_CONNECT, + PMC_USB_DISCONNECT, + PMC_USB_SAFE_MODE, + PMC_USB_ALT_MODE, + PMC_USB_DP_HPD, +}; + +#define PMC_USB_MSG_USB2_PORT_SHIFT 0 +#define PMC_USB_MSG_USB3_PORT_SHIFT 4 +#define PMC_USB_MSG_UFP_SHIFT 4 +#define PMC_USB_MSG_ORI_HSL_SHIFT 5 +#define PMC_USB_MSG_ORI_AUX_SHIFT 6 + +/* Alt Mode Request */ +struct altmode_req { + u8 usage; + u8 mode_type; + u8 mode_id; + u8 reserved; + u32 mode_data; +} __packed; + +#define PMC_USB_MODE_TYPE_SHIFT 4 + +enum { + PMC_USB_MODE_TYPE_USB, + PMC_USB_MODE_TYPE_DP, + PMC_USB_MODE_TYPE_TBT, +}; + +/* Common Mode Data bits */ +#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(2) + +#define PMC_USB_ALTMODE_ORI_SHIFT 1 +#define PMC_USB_ALTMODE_UFP_SHIFT 3 +#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4 +#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5 + +/* DP specific Mode Data bits */ +#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8 + +/* TBT specific Mode Data bits */ +#define PMC_USB_ALTMODE_TBT_TYPE BIT(17) +#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18) +#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20) +#define PMC_USB_ALTMODE_FORCE_LSR BIT(23) +#define PMC_USB_ALTMODE_CABLE_SPD(_s_) (((_s_) & GENMASK(2, 0)) << 25) +#define PMC_USB_ALTMODE_CABLE_USB31 1 +#define PMC_USB_ALTMODE_CABLE_10GPS 2 +#define PMC_USB_ALTMODE_CABLE_20GPS 3 +#define PMC_USB_ALTMODE_TBT_GEN(_g_) (((_g_) & GENMASK(1, 0)) << 28) + +/* Display HPD Request bits */ +#define PMC_USB_DP_HPD_IRQ BIT(5) +#define PMC_USB_DP_HPD_LVL BIT(6) + +struct pmc_usb; + +struct pmc_usb_port { + int num; + struct pmc_usb *pmc; + struct typec_mux *typec_mux; + struct typec_switch *typec_sw; + struct usb_role_switch *usb_sw; + + enum typec_orientation orientation; + enum usb_role role; + + u8 usb2_port; + u8 usb3_port; +}; + +struct pmc_usb { + u8 num_ports; + struct device *dev; + struct pmc_usb_port *port; +}; + +static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len) +{ + u8 response[4]; + + /* + * Error bit will always be 0 with the USBC command. + * Status can be checked from the response message. + */ + intel_pmc_ipc_command(PMC_USBC_CMD, 0, msg, len, + (void *)response, 1); + + if (response[2]) { + if (response[2] & BIT(1)) + return -EIO; + return -EBUSY; + } + + return 0; +} + +static int +pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state) +{ + struct typec_displayport_data *data = state->data; + u8 msg[2] = { }; + + msg[0] = PMC_USB_DP_HPD; + msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + + msg[1] = PMC_USB_DP_HPD_IRQ; + + if (data->status & DP_STATUS_HPD_STATE) + msg[1] |= PMC_USB_DP_HPD_LVL; + + return pmc_usb_command(port, msg, sizeof(msg)); +} + +static int +pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) +{ + struct typec_displayport_data *data = state->data; + struct altmode_req req = { }; + + if (data->status & DP_STATUS_IRQ_HPD) + return pmc_usb_mux_dp_hpd(port, state); + + req.usage = PMC_USB_ALT_MODE; + req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + req.mode_type = PMC_USB_MODE_TYPE_DP << PMC_USB_MODE_TYPE_SHIFT; + + req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; + req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; + req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; + req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; + + req.mode_data |= (state->mode - TYPEC_STATE_MODAL) << + PMC_USB_ALTMODE_DP_MODE_SHIFT; + + return pmc_usb_command(port, (void *)&req, sizeof(req)); +} + +static int +pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state) +{ + struct typec_thunderbolt_data *data = state->data; + u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode); + struct altmode_req req = { }; + + req.usage = PMC_USB_ALT_MODE; + req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT; + + req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; + req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; + req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; + req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; + + if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3) + req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE; + + if (data->cable_mode & TBT_CABLE_OPTICAL) + req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE; + + if (data->cable_mode & TBT_CABLE_LINK_TRAINING) + req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK; + + if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) + req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE; + + req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed); + + return pmc_usb_command(port, (void *)&req, sizeof(req)); +} + +static int pmc_usb_mux_safe_state(struct pmc_usb_port *port) +{ + u8 msg; + + msg = PMC_USB_SAFE_MODE; + msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + + return pmc_usb_command(port, &msg, sizeof(msg)); +} + +static int pmc_usb_connect(struct pmc_usb_port *port) +{ + u8 msg[2]; + + msg[0] = PMC_USB_CONNECT; + msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + + msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT; + msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_HSL_SHIFT; + msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_AUX_SHIFT; + + return pmc_usb_command(port, msg, sizeof(msg)); +} + +static int pmc_usb_disconnect(struct pmc_usb_port *port) +{ + u8 msg[2]; + + msg[0] = PMC_USB_DISCONNECT; + msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + + msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT; + + return pmc_usb_command(port, msg, sizeof(msg)); +} + +static int +pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state) +{ + struct pmc_usb_port *port = typec_mux_get_drvdata(mux); + + if (!state->alt) + return 0; + + if (state->mode == TYPEC_STATE_SAFE) + return pmc_usb_mux_safe_state(port); + + switch (state->alt->svid) { + case USB_TYPEC_TBT_SID: + return pmc_usb_mux_tbt(port, state); + case USB_TYPEC_DP_SID: + return pmc_usb_mux_dp(port, state); + } + + return -EOPNOTSUPP; +} + +static int pmc_usb_set_orientation(struct typec_switch *sw, + enum typec_orientation orientation) +{ + struct pmc_usb_port *port = typec_switch_get_drvdata(sw); + + if (port->orientation == orientation) + return 0; + + port->orientation = orientation; + + if (port->role) { + if (orientation == TYPEC_ORIENTATION_NONE) + return pmc_usb_disconnect(port); + else + return pmc_usb_connect(port); + } + + return 0; +} + +static int pmc_usb_set_role(struct usb_role_switch *sw, enum usb_role role) +{ + struct pmc_usb_port *port = usb_role_switch_get_drvdata(sw); + + if (port->role == role) + return 0; + + port->role = role; + + if (port->orientation) { + if (role == USB_ROLE_NONE) + return pmc_usb_disconnect(port); + else + return pmc_usb_connect(port); + } + + return 0; +} + +static int pmc_usb_register_port(struct pmc_usb *pmc, int index, + struct fwnode_handle *fwnode) +{ + struct pmc_usb_port *port = &pmc->port[index]; + struct usb_role_switch_desc desc = { }; + struct typec_switch_desc sw_desc = { }; + struct typec_mux_desc mux_desc = { }; + int ret; + + ret = fwnode_property_read_u8(fwnode, "usb2-port", &port->usb2_port); + if (ret) + return ret; + + ret = fwnode_property_read_u8(fwnode, "usb3-port", &port->usb3_port); + if (ret) + return ret; + + port->num = index; + port->pmc = pmc; + + sw_desc.fwnode = fwnode; + sw_desc.drvdata = port; + sw_desc.name = fwnode_get_name(fwnode); + sw_desc.set = pmc_usb_set_orientation; + + port->typec_sw = typec_switch_register(pmc->dev, &sw_desc); + if (IS_ERR(port->typec_sw)) + return PTR_ERR(port->typec_sw); + + mux_desc.fwnode = fwnode; + mux_desc.drvdata = port; + mux_desc.name = fwnode_get_name(fwnode); + mux_desc.set = pmc_usb_mux_set; + + port->typec_mux = typec_mux_register(pmc->dev, &mux_desc); + if (IS_ERR(port->typec_mux)) { + ret = PTR_ERR(port->typec_mux); + goto err_unregister_switch; + } + + desc.fwnode = fwnode; + desc.driver_data = port; + desc.name = fwnode_get_name(fwnode); + desc.set = pmc_usb_set_role; + + port->usb_sw = usb_role_switch_register(pmc->dev, &desc); + if (IS_ERR(port->usb_sw)) { + ret = PTR_ERR(port->usb_sw); + goto err_unregister_mux; + } + + return 0; + +err_unregister_mux: + typec_mux_unregister(port->typec_mux); + +err_unregister_switch: + typec_switch_unregister(port->typec_sw); + + return ret; +} + +static int pmc_usb_probe(struct platform_device *pdev) +{ + struct fwnode_handle *fwnode = NULL; + struct pmc_usb *pmc; + int i = 0; + int ret; + + pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL); + if (!pmc) + return -ENOMEM; + + device_for_each_child_node(&pdev->dev, fwnode) + pmc->num_ports++; + + pmc->port = devm_kcalloc(&pdev->dev, pmc->num_ports, + sizeof(struct pmc_usb_port), GFP_KERNEL); + if (!pmc->port) + return -ENOMEM; + + pmc->dev = &pdev->dev; + + /* + * For every physical USB connector (USB2 and USB3 combo) there is a + * child ACPI device node under the PMC mux ACPI device object. + */ + for (i = 0; i < pmc->num_ports; i++) { + fwnode = device_get_next_child_node(pmc->dev, fwnode); + if (!fwnode) + break; + + ret = pmc_usb_register_port(pmc, i, fwnode); + if (ret) + goto err_remove_ports; + } + + platform_set_drvdata(pdev, pmc); + + return 0; + +err_remove_ports: + for (i = 0; i < pmc->num_ports; i++) { + typec_switch_unregister(pmc->port[i].typec_sw); + typec_mux_unregister(pmc->port[i].typec_mux); + } + + return ret; +} + +static int pmc_usb_remove(struct platform_device *pdev) +{ + struct pmc_usb *pmc = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < pmc->num_ports; i++) { + typec_switch_unregister(pmc->port[i].typec_sw); + typec_mux_unregister(pmc->port[i].typec_mux); + } + + return 0; +} + +static const struct acpi_device_id pmc_usb_acpi_ids[] = { + { "INTC105C", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmc_usb_acpi_ids); + +static struct platform_driver pmc_usb_driver = { + .driver = { + .name = "intel_pmc_usb", + .acpi_match_table = ACPI_PTR(pmc_usb_acpi_ids), + }, + .probe = pmc_usb_probe, + .remove = pmc_usb_remove, +}; + +module_platform_driver(pmc_usb_driver); + +MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel PMC USB mux control"); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index f3087ef8265c..de3576e6530a 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -373,6 +373,14 @@ struct pd_rx_event { ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \ (port)->port_type == TYPEC_PORT_DRP) +#define tcpm_data_role_for_source(port) \ + ((port)->typec_caps.data == TYPEC_PORT_UFP ? \ + TYPEC_DEVICE : TYPEC_HOST) + +#define tcpm_data_role_for_sink(port) \ + ((port)->typec_caps.data == TYPEC_PORT_DFP ? \ + TYPEC_HOST : TYPEC_DEVICE) + static enum tcpm_state tcpm_default_state(struct tcpm_port *port) { if (port->port_type == TYPEC_PORT_DRP) { @@ -788,10 +796,30 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached, else orientation = TYPEC_ORIENTATION_REVERSE; - if (data == TYPEC_HOST) - usb_role = USB_ROLE_HOST; - else - usb_role = USB_ROLE_DEVICE; + if (port->typec_caps.data == TYPEC_PORT_DRD) { + if (data == TYPEC_HOST) + usb_role = USB_ROLE_HOST; + else + usb_role = USB_ROLE_DEVICE; + } else if (port->typec_caps.data == TYPEC_PORT_DFP) { + if (data == TYPEC_HOST) { + if (role == TYPEC_SOURCE) + usb_role = USB_ROLE_HOST; + else + usb_role = USB_ROLE_NONE; + } else { + return -ENOTSUPP; + } + } else { + if (data == TYPEC_DEVICE) { + if (role == TYPEC_SINK) + usb_role = USB_ROLE_DEVICE; + else + usb_role = USB_ROLE_NONE; + } else { + return -ENOTSUPP; + } + } ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation); if (ret < 0) @@ -1817,7 +1845,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, tcpm_set_state(port, SOFT_RESET, 0); break; case PD_CTRL_DR_SWAP: - if (port->port_type != TYPEC_PORT_DRP) { + if (port->typec_caps.data != TYPEC_PORT_DRD) { tcpm_queue_message(port, PD_MSG_CTRL_REJECT); break; } @@ -2618,7 +2646,8 @@ static int tcpm_src_attach(struct tcpm_port *port) if (ret < 0) return ret; - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST); + ret = tcpm_set_roles(port, true, TYPEC_SOURCE, + tcpm_data_role_for_source(port)); if (ret < 0) return ret; @@ -2740,7 +2769,8 @@ static int tcpm_snk_attach(struct tcpm_port *port) if (ret < 0) return ret; - ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE); + ret = tcpm_set_roles(port, true, TYPEC_SINK, + tcpm_data_role_for_sink(port)); if (ret < 0) return ret; @@ -2766,7 +2796,8 @@ static int tcpm_acc_attach(struct tcpm_port *port) if (port->attached) return 0; - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST); + ret = tcpm_set_roles(port, true, TYPEC_SOURCE, + tcpm_data_role_for_source(port)); if (ret < 0) return ret; @@ -3293,7 +3324,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_vconn(port, true); tcpm_set_vbus(port, false); tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, - TYPEC_HOST); + tcpm_data_role_for_source(port)); tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); break; case SRC_HARD_RESET_VBUS_ON: @@ -3308,7 +3339,7 @@ static void run_state_machine(struct tcpm_port *port) if (port->pd_capable) tcpm_set_charge(port, false); tcpm_set_roles(port, port->self_powered, TYPEC_SINK, - TYPEC_DEVICE); + tcpm_data_role_for_sink(port)); /* * VBUS may or may not toggle, depending on the adapter. * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON @@ -3649,8 +3680,12 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, case SRC_SEND_CAPABILITIES: case SRC_READY: if (tcpm_port_is_disconnected(port) || - !tcpm_port_is_source(port)) - tcpm_set_state(port, SRC_UNATTACHED, 0); + !tcpm_port_is_source(port)) { + if (port->port_type == TYPEC_PORT_SRC) + tcpm_set_state(port, SRC_UNATTACHED, 0); + else + tcpm_set_state(port, SNK_UNATTACHED, 0); + } break; case SNK_UNATTACHED: if (tcpm_port_is_sink(port)) @@ -3969,7 +4004,7 @@ static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) mutex_lock(&port->swap_lock); mutex_lock(&port->lock); - if (port->port_type != TYPEC_PORT_DRP) { + if (port->typec_caps.data != TYPEC_PORT_DRD) { ret = -EINVAL; goto port_unlock; } @@ -4711,6 +4746,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */ port->typec_caps.driver_data = port; port->typec_caps.ops = &tcpm_ops; + port->typec_caps.orientation_aware = 1; port->partner_desc.identity = &port->partner_ident; port->port_type = port->typec_caps.type; diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 0f1273ae086c..048381c058a5 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -271,6 +271,9 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) return; dp = typec_altmode_get_drvdata(alt); + if (!dp) + return; + dp->data.conf = 0; dp->data.status = 0; dp->initialized = false; @@ -285,6 +288,8 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, struct typec_altmode *alt; struct ucsi_dp *dp; + mutex_lock(&con->lock); + /* We can't rely on the firmware with the capabilities. */ desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE; @@ -293,12 +298,15 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, desc->vdo |= all_assignments << 16; alt = typec_port_register_altmode(con->port, desc); - if (IS_ERR(alt)) + if (IS_ERR(alt)) { + mutex_unlock(&con->lock); return alt; + } dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); if (!dp) { typec_unregister_altmode(alt); + mutex_unlock(&con->lock); return ERR_PTR(-ENOMEM); } @@ -311,5 +319,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, alt->ops = &ucsi_displayport_ops; typec_altmode_set_drvdata(alt, dp); + mutex_unlock(&con->lock); + return alt; } diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index d5a6aac86327..ddf2ad3752de 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -270,9 +270,16 @@ static int ucsi_register_altmode(struct ucsi_connector *con, switch (desc->svid) { case USB_TYPEC_DP_SID: - case USB_TYPEC_NVIDIA_VLINK_SID: alt = ucsi_register_displayport(con, override, i, desc); break; + case USB_TYPEC_NVIDIA_VLINK_SID: + if (desc->vdo == USB_TYPEC_NVIDIA_VLINK_DBG_VDO) + alt = typec_port_register_altmode(con->port, + desc); + else + alt = ucsi_register_displayport(con, override, + i, desc); + break; default: alt = typec_port_register_altmode(con->port, desc); break; @@ -400,7 +407,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient) struct typec_altmode_desc desc; struct ucsi_altmode alt[2]; u64 command; - int num = 1; + int num; int ret; int len; int j; @@ -475,7 +482,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient) while (adev[i]) { if (recipient == UCSI_RECIPIENT_SOP && (adev[i]->svid == USB_TYPEC_DP_SID || - adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID)) { + (adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID && + adev[i]->vdo != USB_TYPEC_NVIDIA_VLINK_DBG_VDO))) { pdev = typec_altmode_get_partner(adev[i]); ucsi_displayport_remove_partner((void *)pdev); } diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index e434b9c9a9eb..8e831108f481 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -119,12 +119,14 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25) /* GET_ALTERNATE_MODES command bits */ +#define UCSI_ALTMODE_RECIPIENT(_r_) (((_r_) >> 16) & 0x7) #define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16) #define UCSI_RECIPIENT_CON 0 #define UCSI_RECIPIENT_SOP 1 #define UCSI_RECIPIENT_SOP_P 2 #define UCSI_RECIPIENT_SOP_PP 3 #define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24) +#define UCSI_ALTMODE_OFFSET(_r_) (((_r_) >> 32) & 0xff) #define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32) #define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40) @@ -340,4 +342,11 @@ static inline void ucsi_displayport_remove_partner(struct typec_altmode *adev) { } #endif /* CONFIG_TYPEC_DP_ALTMODE */ +/* + * NVIDIA VirtualLink (svid 0x955) has two altmode. VirtualLink + * DP mode with vdo=0x1 and NVIDIA test mode with vdo=0x3 + */ +#define USB_TYPEC_NVIDIA_VLINK_DP_VDO 0x1 +#define USB_TYPEC_NVIDIA_VLINK_DBG_VDO 0x3 + #endif /* __DRIVER_USB_TYPEC_UCSI_H */ diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index a5b8530490db..bff96d64dddf 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -125,6 +125,10 @@ struct version_format { #define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v') #define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10)) +/* Altmode offset for NVIDIA Function Test Board (FTB) */ +#define NVIDIA_FTB_DP_OFFSET (2) +#define NVIDIA_FTB_DBG_OFFSET (3) + struct version_info { struct version_format base; struct version_format app; @@ -477,24 +481,65 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc, *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam); } +/* + * Change the order of vdo values of NVIDIA test device FTB + * (Function Test Board) which reports altmode list with vdo=0x3 + * first and then vdo=0x. Current logic to assign mode value is + * based on order in altmode list and it causes a mismatch of CON + * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1 + * first and then vdo=0x3 + */ +static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc, + struct ucsi_altmode *alt) +{ + switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) { + case NVIDIA_FTB_DP_OFFSET: + if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO) + alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO | + DP_CAP_DP_SIGNALING | DP_CAP_USB | + DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E)); + break; + case NVIDIA_FTB_DBG_OFFSET: + if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO) + alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO; + break; + default: + break; + } +} + static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t val_len) { struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); - int ret; u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset); + struct ucsi_altmode *alt; + int ret; ret = ccg_read(uc, reg, val, val_len); if (ret) return ret; - if (offset == UCSI_MESSAGE_IN) { - if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM && - uc->has_multiple_dp) { + if (offset != UCSI_MESSAGE_IN) + return ret; + + switch (UCSI_COMMAND(uc->last_cmd_sent)) { + case UCSI_GET_CURRENT_CAM: + if (uc->has_multiple_dp) ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val); + break; + case UCSI_GET_ALTERNATE_MODES: + if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) == + UCSI_RECIPIENT_SOP) { + alt = val; + if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID) + ucsi_ccg_nvidia_altmode(uc, alt); } - uc->last_cmd_sent = 0; + break; + default: + break; } + uc->last_cmd_sent = 0; return ret; } @@ -1219,6 +1264,7 @@ static int ccg_restart(struct ucsi_ccg *uc) return status; } + pm_runtime_enable(uc->dev); return 0; } @@ -1234,6 +1280,7 @@ static void ccg_update_firmware(struct work_struct *work) if (flash_mode != FLASH_NOT_NEEDED) { ucsi_unregister(uc->ucsi); + pm_runtime_disable(uc->dev); free_irq(uc->irq, uc); ccg_fw_update(uc, flash_mode); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e158159671fa..18e205eeb9af 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f) static struct socket *get_raw_socket(int fd) { - struct { - struct sockaddr_ll sa; - char buf[MAX_ADDR_LEN]; - } uaddr; int r; struct socket *sock = sockfd_lookup(fd, &r); @@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd) goto err; } - r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0); - if (r < 0) - goto err; - - if (uaddr.sa.sll_family != AF_PACKET) { + if (sock->sk->sk_family != AF_PACKET) { r = -EPFNOSUPPORT; goto err; } diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 403707a3e503..0093bbd0d326 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -456,6 +456,13 @@ config BACKLIGHT_RAVE_SP help Support for backlight control on RAVE SP device. +config BACKLIGHT_LED + tristate "Generic LED based Backlight Driver" + depends on LEDS_CLASS && OF + help + If you have a LCD backlight adjustable by LED class driver, say Y + to enable this driver. + endif # BACKLIGHT_CLASS_DEVICE endmenu diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 6f8777037c37..0c1a1524627a 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -57,3 +57,4 @@ obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o obj-$(CONFIG_BACKLIGHT_ARCXCNN) += arcxcnn_bl.o obj-$(CONFIG_BACKLIGHT_RAVE_SP) += rave-sp-backlight.o +obj-$(CONFIG_BACKLIGHT_LED) += led_bl.o diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c new file mode 100644 index 000000000000..3f66549997c8 --- /dev/null +++ b/drivers/video/backlight/led_bl.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * Based on pwm_bl.c + */ + +#include <linux/backlight.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +struct led_bl_data { + struct device *dev; + struct backlight_device *bl_dev; + struct led_classdev **leds; + bool enabled; + int nb_leds; + unsigned int *levels; + unsigned int default_brightness; + unsigned int max_brightness; +}; + +static void led_bl_set_brightness(struct led_bl_data *priv, int level) +{ + int i; + int bkl_brightness; + + if (priv->levels) + bkl_brightness = priv->levels[level]; + else + bkl_brightness = level; + + for (i = 0; i < priv->nb_leds; i++) + led_set_brightness(priv->leds[i], bkl_brightness); + + priv->enabled = true; +} + +static void led_bl_power_off(struct led_bl_data *priv) +{ + int i; + + if (!priv->enabled) + return; + + for (i = 0; i < priv->nb_leds; i++) + led_set_brightness(priv->leds[i], LED_OFF); + + priv->enabled = false; +} + +static int led_bl_update_status(struct backlight_device *bl) +{ + struct led_bl_data *priv = bl_get_data(bl); + int brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK || + bl->props.fb_blank != FB_BLANK_UNBLANK || + bl->props.state & BL_CORE_FBBLANK) + brightness = 0; + + if (brightness > 0) + led_bl_set_brightness(priv, brightness); + else + led_bl_power_off(priv); + + return 0; +} + +static const struct backlight_ops led_bl_ops = { + .update_status = led_bl_update_status, +}; + +static int led_bl_get_leds(struct device *dev, + struct led_bl_data *priv) +{ + int i, nb_leds, ret; + struct device_node *node = dev->of_node; + struct led_classdev **leds; + unsigned int max_brightness; + unsigned int default_brightness; + + ret = of_count_phandle_with_args(node, "leds", NULL); + if (ret < 0) { + dev_err(dev, "Unable to get led count\n"); + return -EINVAL; + } + + nb_leds = ret; + if (nb_leds < 1) { + dev_err(dev, "At least one LED must be specified!\n"); + return -EINVAL; + } + + leds = devm_kzalloc(dev, sizeof(struct led_classdev *) * nb_leds, + GFP_KERNEL); + if (!leds) + return -ENOMEM; + + for (i = 0; i < nb_leds; i++) { + leds[i] = devm_of_led_get(dev, i); + if (IS_ERR(leds[i])) + return PTR_ERR(leds[i]); + } + + /* check that the LEDs all have the same brightness range */ + max_brightness = leds[0]->max_brightness; + for (i = 1; i < nb_leds; i++) { + if (max_brightness != leds[i]->max_brightness) { + dev_err(dev, "LEDs must have identical ranges\n"); + return -EINVAL; + } + } + + /* get the default brightness from the first LED from the list */ + default_brightness = leds[0]->brightness; + + priv->nb_leds = nb_leds; + priv->leds = leds; + priv->max_brightness = max_brightness; + priv->default_brightness = default_brightness; + + return 0; +} + +static int led_bl_parse_levels(struct device *dev, + struct led_bl_data *priv) +{ + struct device_node *node = dev->of_node; + int num_levels; + u32 value; + int ret; + + if (!node) + return -ENODEV; + + num_levels = of_property_count_u32_elems(node, "brightness-levels"); + if (num_levels > 1) { + int i; + unsigned int db; + u32 *levels = NULL; + + levels = devm_kzalloc(dev, sizeof(u32) * num_levels, + GFP_KERNEL); + if (!levels) + return -ENOMEM; + + ret = of_property_read_u32_array(node, "brightness-levels", + levels, + num_levels); + if (ret < 0) + return ret; + + /* + * Try to map actual LED brightness to backlight brightness + * level + */ + db = priv->default_brightness; + for (i = 0 ; i < num_levels; i++) { + if ((i && db > levels[i-1]) && db <= levels[i]) + break; + } + priv->default_brightness = i; + priv->max_brightness = num_levels - 1; + priv->levels = levels; + } else if (num_levels >= 0) + dev_warn(dev, "Not enough levels defined\n"); + + ret = of_property_read_u32(node, "default-brightness-level", &value); + if (!ret && value <= priv->max_brightness) + priv->default_brightness = value; + else if (!ret && value > priv->max_brightness) + dev_warn(dev, "Invalid default brightness. Ignoring it\n"); + + return 0; +} + +static int led_bl_probe(struct platform_device *pdev) +{ + struct backlight_properties props; + struct led_bl_data *priv; + int ret, i; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + priv->dev = &pdev->dev; + + ret = led_bl_get_leds(&pdev->dev, priv); + if (ret) + return ret; + + ret = led_bl_parse_levels(&pdev->dev, priv); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to parse DT data\n"); + return ret; + } + + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_RAW; + props.max_brightness = priv->max_brightness; + props.brightness = priv->default_brightness; + props.power = (priv->default_brightness > 0) ? FB_BLANK_POWERDOWN : + FB_BLANK_UNBLANK; + priv->bl_dev = backlight_device_register(dev_name(&pdev->dev), + &pdev->dev, priv, &led_bl_ops, &props); + if (IS_ERR(priv->bl_dev)) { + dev_err(&pdev->dev, "Failed to register backlight\n"); + return PTR_ERR(priv->bl_dev); + } + + for (i = 0; i < priv->nb_leds; i++) + led_sysfs_disable(priv->leds[i]); + + backlight_update_status(priv->bl_dev); + + return 0; +} + +static int led_bl_remove(struct platform_device *pdev) +{ + struct led_bl_data *priv = platform_get_drvdata(pdev); + struct backlight_device *bl = priv->bl_dev; + int i; + + backlight_device_unregister(bl); + + led_bl_power_off(priv); + for (i = 0; i < priv->nb_leds; i++) + led_sysfs_enable(priv->leds[i]); + + return 0; +} + +static const struct of_device_id led_bl_of_match[] = { + { .compatible = "led-backlight" }, + { } +}; + +MODULE_DEVICE_TABLE(of, led_bl_of_match); + +static struct platform_driver led_bl_driver = { + .driver = { + .name = "led-backlight", + .of_match_table = of_match_ptr(led_bl_of_match), + }, + .probe = led_bl_probe, + .remove = led_bl_remove, +}; + +module_platform_driver(led_bl_driver); + +MODULE_DESCRIPTION("LED based Backlight Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:led-backlight"); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index de7b8382aba9..998b0de1812f 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { + if ((width << 1) * height > vga_vram_size) + return -EINVAL; + if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7bfe365d9372..341458fd95ca 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -959,8 +959,8 @@ out_iput: iput(vb->vb_dev_info.inode); out_kern_unmount: kern_unmount(balloon_mnt); -#endif out_del_vqs: +#endif vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 867c7ebd3f10..58b96baa8d48 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq) vq->split.queue_size_in_bytes, vq->split.vring.desc, vq->split.queue_dma_addr); - - kfree(vq->split.desc_state); } } + if (!vq->packed_ring) + kfree(vq->split.desc_state); list_del(&_vq->list); kfree(vq); } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index cec868f8db3f..9ea2b43d4b01 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -207,6 +207,7 @@ config DA9063_WATCHDOG config DA9062_WATCHDOG tristate "Dialog DA9062/61 Watchdog" depends on MFD_DA9062 || COMPILE_TEST + depends on I2C select WATCHDOG_CORE help Support for the watchdog in the DA9062 and DA9061 PMICs. @@ -841,6 +842,7 @@ config MEDIATEK_WATCHDOG tristate "Mediatek SoCs watchdog support" depends on ARCH_MEDIATEK || COMPILE_TEST select WATCHDOG_CORE + select RESET_CONTROLLER help Say Y here to include support for the watchdog timer in Mediatek SoCs. diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index 47eefe072b40..0ad15d55071c 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -16,6 +16,7 @@ #include <linux/jiffies.h> #include <linux/mfd/da9062/registers.h> #include <linux/mfd/da9062/core.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/of.h> @@ -31,6 +32,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; struct da9062_watchdog { struct da9062 *hw; struct watchdog_device wdtdev; + bool use_sw_pm; }; static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs) @@ -95,13 +97,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd) struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); int ret; - ret = da9062_reset_watchdog_timer(wdt); - if (ret) { - dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n", - ret); - return ret; - } - ret = regmap_update_bits(wdt->hw->regmap, DA9062AA_CONTROL_D, DA9062AA_TWDSCALE_MASK, @@ -200,6 +195,8 @@ static int da9062_wdt_probe(struct platform_device *pdev) if (!wdt) return -ENOMEM; + wdt->use_sw_pm = device_property_present(dev, "dlg,use-sw-pm"); + wdt->hw = chip; wdt->wdtdev.info = &da9062_watchdog_info; @@ -226,6 +223,10 @@ static int da9062_wdt_probe(struct platform_device *pdev) static int __maybe_unused da9062_wdt_suspend(struct device *dev) { struct watchdog_device *wdd = dev_get_drvdata(dev); + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + + if (!wdt->use_sw_pm) + return 0; if (watchdog_active(wdd)) return da9062_wdt_stop(wdd); @@ -236,6 +237,10 @@ static int __maybe_unused da9062_wdt_suspend(struct device *dev) static int __maybe_unused da9062_wdt_resume(struct device *dev) { struct watchdog_device *wdd = dev_get_drvdata(dev); + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + + if (!wdt->use_sw_pm) + return 0; if (watchdog_active(wdd)) return da9062_wdt_start(wdd); diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h index 0f7373ba10d5..69e92e692ae0 100644 --- a/drivers/watchdog/iTCO_vendor.h +++ b/drivers/watchdog/iTCO_vendor.h @@ -1,10 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* iTCO Vendor Specific Support hooks */ #ifdef CONFIG_ITCO_VENDOR_SUPPORT +extern int iTCO_vendorsupport; extern void iTCO_vendor_pre_start(struct resource *, unsigned int); extern void iTCO_vendor_pre_stop(struct resource *); extern int iTCO_vendor_check_noreboot_on(void); #else +#define iTCO_vendorsupport 0 #define iTCO_vendor_pre_start(acpibase, heartbeat) {} #define iTCO_vendor_pre_stop(acpibase) {} #define iTCO_vendor_check_noreboot_on() 1 diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c index 4f1b96f59349..cf0eaa04b064 100644 --- a/drivers/watchdog/iTCO_vendor_support.c +++ b/drivers/watchdog/iTCO_vendor_support.c @@ -39,8 +39,10 @@ /* Broken BIOS */ #define BROKEN_BIOS 911 -static int vendorsupport; -module_param(vendorsupport, int, 0); +int iTCO_vendorsupport; +EXPORT_SYMBOL(iTCO_vendorsupport); + +module_param_named(vendorsupport, iTCO_vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS"); @@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires) void iTCO_vendor_pre_start(struct resource *smires, unsigned int heartbeat) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(smires); break; @@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(struct resource *smires) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(smires); break; @@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop); int iTCO_vendor_check_noreboot_on(void) { - switch (vendorsupport) { + switch (iTCO_vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: @@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { - if (vendorsupport == SUPERMICRO_NEW_BOARD) { + if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) { pr_warn("Option vendorsupport=%d is no longer supported, " "please use the w83627hf_wdt driver instead\n", SUPERMICRO_NEW_BOARD); return -EINVAL; } - pr_info("vendor-support=%d\n", vendorsupport); + pr_info("vendor-support=%d\n", iTCO_vendorsupport); return 0; } diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 156360e37714..e707c4797f76 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev) if (!p->tco_res) return -ENODEV; - p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI); - if (!p->smi_res) - return -ENODEV; - p->iTCO_version = pdata->version; p->pci_dev = to_pci_dev(dev->parent); + p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI); + if (p->smi_res) { + /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ + if (!devm_request_region(dev, p->smi_res->start, + resource_size(p->smi_res), + pdev->name)) { + pr_err("I/O address 0x%04llx already in use, device disabled\n", + (u64)SMI_EN(p)); + return -EBUSY; + } + } else if (iTCO_vendorsupport || + turn_SMI_watchdog_clear_off >= p->iTCO_version) { + pr_err("SMI I/O resource is missing\n"); + return -ENODEV; + } + iTCO_wdt_no_reboot_bit_setup(p, pdata); /* @@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ p->update_no_reboot_bit(p->no_reboot_priv, true); - /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ - if (!devm_request_region(dev, p->smi_res->start, - resource_size(p->smi_res), - pdev->name)) { - pr_err("I/O address 0x%04llx already in use, device disabled\n", - (u64)SMI_EN(p)); - return -EBUSY; - } if (turn_SMI_watchdog_clear_off >= p->iTCO_version) { /* * Bit 13: TCO_EN -> 0 diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index b069349b52f5..3065dd670a18 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -54,6 +54,13 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +#define WDAT_DEFAULT_TIMEOUT 30 + +static int timeout = WDAT_DEFAULT_TIMEOUT; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" + __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")"); + static int wdat_wdt_read(struct wdat_wdt *wdat, const struct wdat_instruction *instr, u32 *value) { @@ -389,7 +396,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) memset(&r, 0, sizeof(r)); r.start = gas->address; - r.end = r.start + gas->access_width - 1; + r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { r.flags = IORESOURCE_MEM; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { @@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, wdat); + /* + * Set initial timeout so that userspace has time to configure the + * watchdog properly after it has opened the device. In some cases + * the BIOS default is too short and causes immediate reboot. + */ + if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms || + timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) { + dev_warn(dev, "Invalid timeout %d given, using %d\n", + timeout, WDAT_DEFAULT_TIMEOUT); + timeout = WDAT_DEFAULT_TIMEOUT; + } + + ret = wdat_wdt_set_timeout(&wdat->wdd, timeout); + if (ret) + return ret; + watchdog_set_nowayout(&wdat->wdd, nowayout); return devm_watchdog_register_device(dev, &wdat->wdd); } diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index f192b6f42da9..ec975decb5de 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -94,7 +94,7 @@ static int setup_cpu_watcher(struct notifier_block *notifier, for_each_possible_cpu(cpu) { if (vcpu_online(cpu) == 0) { - (void)cpu_down(cpu); + device_offline(get_cpu_device(cpu)); set_cpu_present(cpu, false); } } diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 70650b248de5..17240c5325a3 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void) * cpu. */ __this_cpu_write(xen_in_preemptible_hcall, false); - _cond_resched(); + local_irq_enable(); + cond_resched(); + local_irq_disable(); __this_cpu_write(xen_in_preemptible_hcall, true); } } diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index ce1077e32466..7c95516a860f 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -52,7 +52,7 @@ struct xen_pcibk_dev_data { unsigned int ack_intr:1; /* .. and ACK-ing */ unsigned long handled; unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */ - char irq_name[0]; /* xen-pcibk[000:04:00.0] */ + char irq_name[]; /* xen-pcibk[000:04:00.0] */ }; /* Used by XenBus and xen_pcibk_ops.c */ diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index d239fc3c5e3d..eb5151fc8efa 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -313,6 +313,8 @@ static int process_msg(void) req->msg.type = state.msg.type; req->msg.len = state.msg.len; req->body = state.body; + /* write body, then update state */ + virt_wmb(); req->state = xb_req_state_got_reply; req->cb(req); } else @@ -395,6 +397,8 @@ static int process_writes(void) if (state.req->state == xb_req_state_aborted) kfree(state.req); else { + /* write err, then update state */ + virt_wmb(); state.req->state = xb_req_state_got_reply; wake_up(&state.req->wq); } diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 66975da4f3b6..8c4d05b687b7 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -239,9 +239,9 @@ int xenbus_dev_probe(struct device *_dev) goto fail; } - spin_lock(&dev->reclaim_lock); + down(&dev->reclaim_sem); err = drv->probe(dev, id); - spin_unlock(&dev->reclaim_lock); + up(&dev->reclaim_sem); if (err) goto fail_put; @@ -271,9 +271,9 @@ int xenbus_dev_remove(struct device *_dev) free_otherend_watch(dev); if (drv->remove) { - spin_lock(&dev->reclaim_lock); + down(&dev->reclaim_sem); drv->remove(dev); - spin_unlock(&dev->reclaim_lock); + up(&dev->reclaim_sem); } module_put(drv->driver.owner); @@ -473,7 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus, goto fail; dev_set_name(&xendev->dev, "%s", devname); - spin_lock_init(&xendev->reclaim_lock); + sema_init(&xendev->reclaim_sem, 1); /* Register with generic device framework. */ err = device_register(&xendev->dev); diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index 791f6fe01e91..9b2fbe69bccc 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -45,6 +45,7 @@ #include <linux/mm.h> #include <linux/notifier.h> #include <linux/export.h> +#include <linux/semaphore.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -257,10 +258,10 @@ static int backend_reclaim_memory(struct device *dev, void *data) drv = to_xenbus_driver(dev->driver); if (drv && drv->reclaim_memory) { xdev = to_xenbus_device(dev); - if (!spin_trylock(&xdev->reclaim_lock)) + if (down_trylock(&xdev->reclaim_sem)) return 0; drv->reclaim_memory(xdev); - spin_unlock(&xdev->reclaim_lock); + up(&xdev->reclaim_sem); } return 0; } diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index ddc18da61834..3a06eb699f33 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -191,8 +191,11 @@ static bool xenbus_ok(void) static bool test_reply(struct xb_req_data *req) { - if (req->state == xb_req_state_got_reply || !xenbus_ok()) + if (req->state == xb_req_state_got_reply || !xenbus_ok()) { + /* read req->state before all other fields */ + virt_rmb(); return true; + } /* Make sure to reread req->state each time. */ barrier(); @@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req) static void *read_reply(struct xb_req_data *req) { - while (req->state != xb_req_state_got_reply) { + do { wait_event(req->wq, test_reply(req)); if (!xenbus_ok()) @@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req) if (req->err) return ERR_PTR(req->err); - } + } while (req->state != xb_req_state_got_reply); return req->body; } |